From a7eaa409e804f218aa06fd02d9166b9a5998b48a Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 2 Jul 2018 10:38:40 +0300 Subject: [PATCH 01/36] Fix TransportChangePasswordActionTests testIncorrectPasswordHashingAlgorithm is based on the assumption that the algorithm selected for the change password request is different than the one selected for the NativeUsersStore. pbkdf2_10000 is the same as pbkdf2 since 10000 is the default cost factor for pbkdf2 and thus should not be used as an option for the passwordHashingSettings. Also make sure that the same algorithm is used for settings and change password requests in other tests for consistency, even if we expect to not reach the code where the algorithm is checked for now. Resolves #31696 Reverts 1c4f480794f2465c78e8e29645956f16971eeead --- .../TransportChangePasswordActionTests.java | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java index 33ed3fc5d976e..e512ad4f23fe7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java @@ -47,18 +47,21 @@ public class TransportChangePasswordActionTests extends ESTestCase { public void testAnonymousUser() { + final String hashingAlgorithm = randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt", "bcrypt9"); Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "superuser").build(); AnonymousUser anonymousUser = new AnonymousUser(settings); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + Settings passwordHashingSettings = Settings.builder(). + put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), hashingAlgorithm).build(); + TransportService transportService = new TransportService(passwordHashingSettings, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportChangePasswordAction action = new TransportChangePasswordAction(settings, transportService, mock(ActionFilters.class), usersStore); ChangePasswordRequest request = new ChangePasswordRequest(); + // Request will fail before the request hashing algorithm is checked, but we use the same algorithm as in settings for consistency request.username(anonymousUser.principal()); - request.passwordHash(Hasher.resolve( - randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt", "bcrypt9")).hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + request.passwordHash(Hasher.resolve(hashingAlgorithm).hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); final AtomicReference throwableRef = new AtomicReference<>(); final AtomicReference responseRef = new AtomicReference<>(); @@ -81,16 +84,19 @@ public void onFailure(Exception e) { } public void testInternalUsers() { + final String hashingAlgorithm = randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt", "bcrypt9"); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + Settings passwordHashingSettings = Settings.builder(). + put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), hashingAlgorithm).build(); + TransportService transportService = new TransportService(passwordHashingSettings, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportChangePasswordAction action = new TransportChangePasswordAction(Settings.EMPTY, transportService, mock(ActionFilters.class), usersStore); ChangePasswordRequest request = new ChangePasswordRequest(); request.username(randomFrom(SystemUser.INSTANCE.principal(), XPackUser.INSTANCE.principal())); - request.passwordHash(Hasher.resolve( - randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt", "bcrypt9")).hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); + // Request will fail before the request hashing algorithm is checked, but we use the same algorithm as in settings for consistency + request.passwordHash(Hasher.resolve(hashingAlgorithm).hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); final AtomicReference throwableRef = new AtomicReference<>(); final AtomicReference responseRef = new AtomicReference<>(); @@ -153,7 +159,6 @@ public void onFailure(Exception e) { verify(usersStore, times(1)).changePassword(eq(request), any(ActionListener.class)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31696") public void testIncorrectPasswordHashingAlgorithm() { final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); final Hasher hasher = Hasher.resolve(randomFrom("pbkdf2", "pbkdf2_1000", "bcrypt9", "bcrypt5")); @@ -166,7 +171,7 @@ public void testIncorrectPasswordHashingAlgorithm() { TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); Settings passwordHashingSettings = Settings.builder().put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), - randomFrom("pbkdf2_50000", "pbkdf2_10000", "bcrypt11", "bcrypt8", "bcrypt")).build(); + randomFrom("pbkdf2_50000", "pbkdf2_100000", "bcrypt11", "bcrypt8", "bcrypt")).build(); TransportChangePasswordAction action = new TransportChangePasswordAction(passwordHashingSettings, transportService, mock(ActionFilters.class), usersStore); action.doExecute(mock(Task.class), request, new ActionListener() { From 3baaa8012e4dc74a12229e828a0e189fff9618ce Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 2 Jul 2018 12:50:18 +0300 Subject: [PATCH 02/36] Revert long lines Introduced in a7eaa409e804f218aa06fd02d9166b9a5998b48a --- .../action/user/TransportChangePasswordActionTests.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java index e512ad4f23fe7..a88478c50ec7d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java @@ -53,8 +53,8 @@ public void testAnonymousUser() { NativeUsersStore usersStore = mock(NativeUsersStore.class); Settings passwordHashingSettings = Settings.builder(). put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), hashingAlgorithm).build(); - TransportService transportService = new TransportService(passwordHashingSettings, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(passwordHashingSettings, null, null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportChangePasswordAction action = new TransportChangePasswordAction(settings, transportService, mock(ActionFilters.class), usersStore); @@ -88,8 +88,8 @@ public void testInternalUsers() { NativeUsersStore usersStore = mock(NativeUsersStore.class); Settings passwordHashingSettings = Settings.builder(). put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), hashingAlgorithm).build(); - TransportService transportService = new TransportService(passwordHashingSettings, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(passwordHashingSettings, null, null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportChangePasswordAction action = new TransportChangePasswordAction(Settings.EMPTY, transportService, mock(ActionFilters.class), usersStore); From 8e838ea12e37ae10db6f844a8bcf4a1158c89976 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Mon, 2 Jul 2018 12:48:04 +0100 Subject: [PATCH 03/36] [ML] Validate ML filter_id (#31535) Like job and datafeed ids, the filter id should be validated with the same rules to avoid document ids that can be problematic. --- .../xpack/core/ml/job/config/MlFilter.java | 9 +++++++- .../action/PutFilterActionRequestTests.java | 2 +- .../core/ml/job/config/MlFilterTests.java | 21 +++++++++++++++---- .../rest-api-spec/test/ml/filter_crud.yml | 12 +++++++++++ .../smoke-test-ml-with-security/build.gradle | 1 + 5 files changed, 39 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java index b45ce73f124fd..c55ba401a2f0a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -15,6 +15,9 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ml.MlMetaIndex; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.MlStrings; import java.io.IOException; import java.util.Arrays; @@ -57,7 +60,7 @@ private static ObjectParser createParser(boolean ignoreUnknownFie private final SortedSet items; private MlFilter(String id, String description, SortedSet items) { - this.id = Objects.requireNonNull(id, ID.getPreferredName() + " must not be null"); + this.id = Objects.requireNonNull(id); this.description = description; this.items = Objects.requireNonNull(items, ITEMS.getPreferredName() + " must not be null"); } @@ -178,6 +181,10 @@ public Builder setItems(String... items) { } public MlFilter build() { + ExceptionsHelper.requireNonNull(id, MlFilter.ID.getPreferredName()); + if (!MlStrings.isValidId(id)) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.INVALID_ID, ID.getPreferredName(), id)); + } return new MlFilter(id, description, items); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java index dfc3f5f37f40c..bed0ab775af12 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutFilterActionRequestTests.java @@ -12,7 +12,7 @@ public class PutFilterActionRequestTests extends AbstractStreamableXContentTestCase { - private final String filterId = randomAlphaOfLengthBetween(1, 20); + private final String filterId = MlFilterTests.randomValidFilterId(); @Override protected Request createTestInstance() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java index c8d8527dc0158..a89250330f046 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.core.ml.job.config; +import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -17,6 +19,7 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.startsWith; public class MlFilterTests extends AbstractSerializingTestCase { @@ -30,7 +33,12 @@ protected MlFilter createTestInstance() { } public static MlFilter createRandom() { - return createRandom(randomAlphaOfLengthBetween(1, 20)); + return createRandom(randomValidFilterId()); + } + + public static String randomValidFilterId() { + CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()); + return generator.ofCodePointsLength(random(), 10, 10); } public static MlFilter createRandom(String filterId) { @@ -58,13 +66,13 @@ protected MlFilter doParseInstance(XContentParser parser) { } public void testNullId() { - NullPointerException ex = expectThrows(NullPointerException.class, () -> MlFilter.builder(null).build()); - assertEquals(MlFilter.ID.getPreferredName() + " must not be null", ex.getMessage()); + Exception ex = expectThrows(IllegalArgumentException.class, () -> MlFilter.builder(null).build()); + assertEquals("[filter_id] must not be null.", ex.getMessage()); } public void testNullItems() { NullPointerException ex = expectThrows(NullPointerException.class, - () -> MlFilter.builder(randomAlphaOfLength(20)).setItems((SortedSet) null).build()); + () -> MlFilter.builder(randomValidFilterId()).setItems((SortedSet) null).build()); assertEquals(MlFilter.ITEMS.getPreferredName() + " must not be null", ex.getMessage()); } @@ -89,6 +97,11 @@ public void testLenientParser() throws IOException { } } + public void testInvalidId() { + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> MlFilter.builder("Invalid id").build()); + assertThat(e.getMessage(), startsWith("Invalid filter_id; 'Invalid id' can contain lowercase")); + } + public void testItemsAreSorted() { MlFilter filter = MlFilter.builder("foo").setItems("c", "b", "a").build(); assertThat(filter.getItems(), contains("a", "b", "c")); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml index d787e07b8c28c..6e9579a061339 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -109,6 +109,18 @@ setup: filter_id: "filter-foo" from: 0 size: 1 + +--- +"Test create filter given invalid filter_id": + - do: + catch: bad_request + xpack.ml.put_filter: + filter_id: Invalid + body: > + { + "description": "this id is invalid due to an upper case character" + } + --- "Test create filter api": - do: diff --git a/x-pack/qa/smoke-test-ml-with-security/build.gradle b/x-pack/qa/smoke-test-ml-with-security/build.gradle index 58e5eca3600f6..2a12aa2f28d3f 100644 --- a/x-pack/qa/smoke-test-ml-with-security/build.gradle +++ b/x-pack/qa/smoke-test-ml-with-security/build.gradle @@ -39,6 +39,7 @@ integTestRunner { 'ml/delete_model_snapshot/Test delete snapshot missing job_id', 'ml/delete_model_snapshot/Test delete with in-use model', 'ml/filter_crud/Test create filter api with mismatching body ID', + 'ml/filter_crud/Test create filter given invalid filter_id', 'ml/filter_crud/Test get filter API with bad ID', 'ml/filter_crud/Test invalid param combinations', 'ml/filter_crud/Test non-existing filter', From 5d94003dc028280cf80c9b38e373dc73a5c511ba Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 2 Jul 2018 13:10:52 +0100 Subject: [PATCH 04/36] [DOCS] Check for Windows and *nix file paths (#31648) Proper cleanup of the docs snippet tests depends on detecting what is being tested (ML, Watcher, etc) this is deduced from the file path and so we must account for Windows and Unix path separators --- .../elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java b/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java index 0196406c478cd..e534e4f41bbd6 100644 --- a/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java +++ b/x-pack/docs/src/test/java/org/elasticsearch/smoketest/XDocsClientYamlTestSuiteIT.java @@ -107,7 +107,7 @@ public void reenableWatcher() throws Exception { @Override protected boolean isWatcherTest() { String testName = getTestName(); - return testName != null && testName.contains("watcher/"); + return testName != null && (testName.contains("watcher/") || testName.contains("watcher\\")); } @Override @@ -118,13 +118,13 @@ protected boolean isMonitoringTest() { @Override protected boolean isMachineLearningTest() { String testName = getTestName(); - return testName != null && testName.contains("ml/"); + return testName != null && (testName.contains("ml/") || testName.contains("ml\\")); } @Override protected boolean isRollupTest() { String testName = getTestName(); - return testName != null && testName.contains("rollup/"); + return testName != null && (testName.contains("rollup/") || testName.contains("rollup\\")); } /** From 31aabe4bf9ea73cbb1c21322d9e9aa2a578b41a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 2 Jul 2018 15:14:44 +0200 Subject: [PATCH 05/36] Clean up double semicolon code typos (#31687) --- .../org/elasticsearch/index/rankeval/EvalQueryQuality.java | 2 +- .../http/netty4/Netty4HttpPipeliningHandler.java | 2 +- .../org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java | 2 +- .../org/elasticsearch/ingest/useragent/UserAgentParser.java | 2 +- .../cluster/routing/allocation/DiskThresholdSettings.java | 2 +- .../org/elasticsearch/common/geo/builders/CircleBuilder.java | 2 +- .../bucket/histogram/DateHistogramAggregator.java | 2 +- .../cluster/settings/ClusterUpdateSettingsRequestTests.java | 2 +- .../concurrent/QueueResizingEsThreadPoolExecutorTests.java | 2 +- .../org/elasticsearch/gateway/PrimaryShardAllocatorTests.java | 4 ++-- .../search/aggregations/bucket/MinDocCountIT.java | 2 +- .../bucket/composite/CompositeAggregatorTests.java | 2 +- .../search/basic/SearchWhileCreatingIndexIT.java | 2 +- .../org/elasticsearch/search/fetch/subphase/InnerHitsIT.java | 2 +- .../search/profile/SearchProfileShardResultsTests.java | 2 +- .../test/AbstractStreamableXContentTestCase.java | 2 +- .../java/org/elasticsearch/test/XContentTestUtilsTests.java | 2 +- .../xpack/core/ml/action/GetJobsStatsAction.java | 2 +- .../xpack/monitoring/exporter/http/HttpExporter.java | 4 ++-- .../xpack/security/transport/nio/SecurityNioTransport.java | 2 +- .../security/authc/esnative/ESNativeMigrateToolTests.java | 2 +- .../org/elasticsearch/xpack/sql/util/LikeConversionTests.java | 4 ++-- 22 files changed, 25 insertions(+), 25 deletions(-) diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvalQueryQuality.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvalQueryQuality.java index c683c54bfdd07..2ad3e589bd8c0 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvalQueryQuality.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvalQueryQuality.java @@ -33,7 +33,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Objects;; +import java.util.Objects; /** * Result of the evaluation metric calculation on one particular query alone. diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java index e6436ccea1a93..827963a2332c9 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -60,7 +60,7 @@ public void channelRead(final ChannelHandlerContext ctx, final Object msg) { @Override public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) { - assert msg instanceof Netty4HttpResponse : "Invalid message type: " + msg.getClass();; + assert msg instanceof Netty4HttpResponse : "Invalid message type: " + msg.getClass(); Netty4HttpResponse response = (Netty4HttpResponse) msg; boolean success = false; try { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java index 38d832d608051..3dad3c8a4373d 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java @@ -83,7 +83,7 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) assert msg instanceof Netty4HttpResponse : "Invalid message type: " + msg.getClass(); Netty4HttpResponse response = (Netty4HttpResponse) msg; setCorsResponseHeaders(response.getRequest().nettyRequest(), response, config); - ctx.write(response, promise);; + ctx.write(response, promise); } public static void setCorsResponseHeaders(HttpRequest request, HttpResponse resp, Netty4CorsConfig config) { diff --git a/plugins/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java b/plugins/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java index 2be4f84317f9e..f8aec041d742a 100644 --- a/plugins/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java +++ b/plugins/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java @@ -161,7 +161,7 @@ String getName() { } public Details parse(String agentString) { - Details details = cache.get(name, agentString);; + Details details = cache.get(name, agentString); if (details == null) { VersionedName userAgent = findMatch(uaPatterns, agentString); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java index 58d93401508e4..fdba1c7009bc3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -56,7 +56,7 @@ public class DiskThresholdSettings { Setting.Property.Dynamic, Setting.Property.NodeScope); public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, - Setting.Property.Dynamic, Setting.Property.NodeScope);; + Setting.Property.Dynamic, Setting.Property.NodeScope); public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), Setting.Property.Dynamic, Setting.Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index 9c58877653e16..c6a0743980fd3 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -56,7 +56,7 @@ public CircleBuilder() { */ public CircleBuilder(StreamInput in) throws IOException { center(readFromStream(in)); - radius(in.readDouble(), DistanceUnit.readFromStream(in));; + radius(in.readDouble(), DistanceUnit.readFromStream(in)); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 94dc18eae63e2..e0b64d2cd5b9e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -75,7 +75,7 @@ class DateHistogramAggregator extends BucketsAggregator { this.rounding = rounding; this.shardRounding = shardRounding; this.offset = offset; - this.order = InternalOrder.validate(order, this);; + this.order = InternalOrder.validate(order, this); this.keyed = keyed; this.minDocCount = minDocCount; this.extendedBounds = extendedBounds; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java index 9701e76619824..06bd3dc26d8f6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java @@ -31,7 +31,7 @@ import java.util.Collections; import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo;; +import static org.hamcrest.CoreMatchers.equalTo; public class ClusterUpdateSettingsRequestTests extends ESTestCase { diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java index 8e69b8093597f..3f2c8fabec27b 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java @@ -131,7 +131,7 @@ public void testAutoQueueSizingWithMin() throws Exception { 5000); int threads = randomIntBetween(1, 5); - int measureWindow = randomIntBetween(10, 100);; + int measureWindow = randomIntBetween(10, 100); int min = randomIntBetween(4981, 4999); logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow); QueueResizingEsThreadPoolExecutor executor = diff --git a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index e3687548190a3..ae643b7f094c2 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -169,7 +169,7 @@ public void testShardLockObtainFailedExceptionPreferOtherValidCopies() { String allocId1 = randomAlphaOfLength(10); String allocId2 = randomAlphaOfLength(10); final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, - allocId1, allocId2);; + allocId1, allocId2); testAllocator.addData(node1, allocId1, randomBoolean(), new ShardLockObtainFailedException(shardId, "test")); testAllocator.addData(node2, allocId2, randomBoolean(), null); @@ -310,7 +310,7 @@ public void testFoundAllocationButThrottlingDecider() { public void testFoundAllocationButNoDecider() { final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); - testAllocator.addData(node1, "allocId1", randomBoolean());; + testAllocator.addData(node1, "allocId1", randomBoolean()); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index 015664109cdfe..af1104879e92e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -162,7 +162,7 @@ TermsAggregationBuilder apply(TermsAggregationBuilder builder, String field) { // check that terms2 is a subset of terms1 private void assertSubset(Terms terms1, Terms terms2, long minDocCount, int size, String include) { - final Matcher matcher = include == null ? null : Pattern.compile(include).matcher("");; + final Matcher matcher = include == null ? null : Pattern.compile(include).matcher(""); final Iterator it1 = terms1.getBuckets().iterator(); final Iterator it2 = terms2.getBuckets().iterator(); int size2 = 0; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 953175018df65..0ed1dacb73f5e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -1457,7 +1457,7 @@ public void testWithKeywordAndTopHits() throws Exception { topHits = result.getBuckets().get(2).getAggregations().get("top_hits"); assertNotNull(topHits); assertEquals(topHits.getHits().getHits().length, 1); - assertEquals(topHits.getHits().getTotalHits(), 1L);; + assertEquals(topHits.getHits().getTotalHits(), 1L); } ); diff --git a/server/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java index 4748b6292c417..23d18562b28c1 100644 --- a/server/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java @@ -73,7 +73,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) logger.info("using preference {}", preference); // we want to make sure that while recovery happens, and a replica gets recovered, its properly refreshed - ClusterHealthStatus status = client().admin().cluster().prepareHealth("test").get().getStatus();; + ClusterHealthStatus status = client().admin().cluster().prepareHealth("test").get().getStatus(); while (status != ClusterHealthStatus.GREEN) { // first, verify that search normal search works SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet(); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 426db6f7425bb..802c343871e1f 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -474,7 +474,7 @@ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception { .innerHit(new InnerHitBuilder().setFetchSourceContext(new FetchSourceContext(false)))).get(); assertNoFailures(response); assertHitCount(response, 1); - hit = response.getHits().getAt(0);; + hit = response.getHits().getAt(0); assertThat(hit.getId(), equalTo("1")); messages = hit.getInnerHits().get("comments.messages"); assertThat(messages.getTotalHits(), equalTo(1L)); diff --git a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java index 7bc9b18860641..36841c08c9048 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileShardResultsTests.java @@ -40,7 +40,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureFieldName; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public class SearchProfileShardResultsTests extends ESTestCase { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java index 4c9d2f7f95231..5bc9c66d4c9a7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractStreamableXContentTestCase.java @@ -42,7 +42,7 @@ public final void testFromXContent() throws IOException { /** * Parses to a new instance using the provided {@link XContentParser} */ - protected abstract T doParseInstance(XContentParser parser) throws IOException;; + protected abstract T doParseInstance(XContentParser parser) throws IOException; /** * Indicates whether the parser supports unknown fields or not. In case it does, such behaviour will be tested by diff --git a/test/framework/src/test/java/org/elasticsearch/test/XContentTestUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/XContentTestUtilsTests.java index 62857fee9addb..c8fbd84f69aec 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/XContentTestUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/XContentTestUtilsTests.java @@ -40,7 +40,7 @@ import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.instanceOf;; +import static org.hamcrest.Matchers.instanceOf; public class XContentTestUtilsTests extends ESTestCase { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index 1ec9f0c473232..ad34f5611383f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -320,7 +320,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject();; + builder.startObject(); jobsStats.doXContentBody(builder, params); builder.endObject(); return builder; diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java index 38e8fbe1fccdc..ee583dd377085 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java @@ -350,7 +350,7 @@ static MultiHttpResource createResources(final Config config) { * @throws SettingsException if any setting is malformed or if no host is set */ private static HttpHost[] createHosts(final Config config) { - final List hosts = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());; + final List hosts = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings()); String configKey = HOST_SETTING.getConcreteSettingForNamespace(config.name()).getKey(); if (hosts.isEmpty()) { @@ -446,7 +446,7 @@ private static void configureSecurity(final RestClientBuilder builder, final Con final Settings sslSettings = SSL_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings()); final SSLIOSessionStrategy sslStrategy = sslService.sslIOSessionStrategy(sslSettings); final CredentialsProvider credentialsProvider = createCredentialsProvider(config); - List hostList = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());; + List hostList = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings()); // sending credentials in plaintext! if (credentialsProvider != null && hostList.stream().findFirst().orElse("").startsWith("https") == false) { logger.warn("exporter [{}] is not using https, but using user authentication with plaintext " + diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java index fd1b1198607d1..874dc36a31cce 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java @@ -130,7 +130,7 @@ public NioTcpChannel createChannel(NioSelector selector, SocketChannel channel) @Override public NioTcpServerChannel createServerChannel(NioSelector selector, ServerSocketChannel channel) throws IOException { - NioTcpServerChannel nioChannel = new NioTcpServerChannel(profileName, channel);; + NioTcpServerChannel nioChannel = new NioTcpServerChannel(profileName, channel); Consumer exceptionHandler = (e) -> onServerException(nioChannel, e); Consumer acceptor = SecurityNioTransport.this::acceptChannel; ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, acceptor, exceptionHandler); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java index da48136e0a3d3..14b0a58419a22 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java @@ -142,7 +142,7 @@ public void testRetrieveRoles() throws Exception { OptionParser parser = muor.getParser(); OptionSet options = parser.parse("-u", username, "-p", password, "-U", url); Set roles = muor.getRolesThatExist(t, settings, new Environment(settings, conf), options); - logger.info("--> output: \n{}", t.getOutput());; + logger.info("--> output: \n{}", t.getOutput()); for (String r : addedRoles) { assertThat("expected list to contain: " + r, roles.contains(r), is(true)); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java index d363eb5274b23..19a544c14e50b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/util/LikeConversionTests.java @@ -8,7 +8,7 @@ import org.elasticsearch.test.ESTestCase; import static org.elasticsearch.xpack.sql.util.StringUtils.likeToJavaPattern; -import static org.elasticsearch.xpack.sql.util.StringUtils.likeToLuceneWildcard;; +import static org.elasticsearch.xpack.sql.util.StringUtils.likeToLuceneWildcard; public class LikeConversionTests extends ESTestCase { @@ -103,4 +103,4 @@ public void testWildcardTripleEscaping() { public void testWildcardIgnoreDoubleEscapedButSkipEscapingOfSql() { assertEquals("foo\\\\\\*bar\\\\?\\?", wildcard("foo\\*bar\\_?")); } -} \ No newline at end of file +} From 1dd10fe69f75c979292f3f0db27ef6671d8cce54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Campinas?= Date: Mon, 2 Jul 2018 15:17:31 +0200 Subject: [PATCH 06/36] [Docs] Correct typos (#31720) --- docs/plugins/repository-gcs.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index 8cf2bc0a73c92..84fb47d1761e4 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -27,9 +27,9 @@ To create a new bucket: 1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] 2. Select your project -3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser] +3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser] 4. Click the "Create Bucket" button -5. Enter a the name of the new bucket +5. Enter the name of the new bucket 6. Select a storage class 7. Select a location 8. Click the "Create" button From c55d11f8b547cf959c56805f2ecc15e6772d9e5d Mon Sep 17 00:00:00 2001 From: Sohaib Iftikhar Date: Mon, 2 Jul 2018 19:25:17 +0200 Subject: [PATCH 07/36] rest-high-level: added get cluster settings (#31706) Relates to #27205 --- .../elasticsearch/client/ClusterClient.java | 31 ++++ .../client/RequestConverters.java | 16 +- .../elasticsearch/client/ClusterClientIT.java | 43 +++++ .../client/ESRestHighLevelClientTestCase.java | 10 ++ .../client/RequestConvertersTests.java | 42 +++-- .../ClusterClientDocumentationIT.java | 68 ++++++++ .../high-level/cluster/get_settings.asciidoc | 92 ++++++++++ .../high-level/supported-apis.asciidoc | 2 + docs/reference/cluster.asciidoc | 2 + docs/reference/cluster/get-settings.asciidoc | 20 +++ .../settings/ClusterGetSettingsRequest.java | 48 +++++ .../settings/ClusterGetSettingsResponse.java | 165 ++++++++++++++++++ .../cluster/RestClusterGetSettingsAction.java | 28 ++- .../ClusterGetSettingsResponseTests.java | 57 ++++++ 14 files changed, 591 insertions(+), 33 deletions(-) create mode 100644 docs/java-rest/high-level/cluster/get_settings.asciidoc create mode 100644 docs/reference/cluster/get-settings.asciidoc create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index b3075a2fddbd5..b72a21ed7d1c4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -22,6 +22,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.rest.RestStatus; @@ -72,6 +74,35 @@ public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsR options, ClusterUpdateSettingsResponse::fromXContent, listener, emptySet()); } + /** + * Get the cluster wide settings using the Cluster Get Settings API. + * See Cluster Get Settings + * API on elastic.co + * @param clusterGetSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ClusterGetSettingsResponse getSettings(ClusterGetSettingsRequest clusterGetSettingsRequest, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(clusterGetSettingsRequest, RequestConverters::clusterGetSettings, + options, ClusterGetSettingsResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously get the cluster wide settings using the Cluster Get Settings API. + * See Cluster Get Settings + * API on elastic.co + * @param clusterGetSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getSettingsAsync(ClusterGetSettingsRequest clusterGetSettingsRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(clusterGetSettingsRequest, RequestConverters::clusterGetSettings, + options, ClusterGetSettingsResponse::fromXContent, listener, emptySet()); + } + /** * Get cluster health using the Cluster Health API. * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 839d86bf9f10a..ee4b56e8a3b85 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -36,6 +36,7 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; @@ -605,7 +606,7 @@ static Request searchTemplate(SearchTemplateRequest searchTemplateRequest) throw request.setEntity(createEntity(searchTemplateRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } - + static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplateRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_msearch/template"); @@ -619,7 +620,7 @@ static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplat byte[] source = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, xContent); request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); return request; - } + } static Request existsAlias(GetAliasesRequest getAliasesRequest) { if ((getAliasesRequest.indices() == null || getAliasesRequest.indices().length == 0) && @@ -709,6 +710,17 @@ static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSett return request; } + static Request clusterGetSettings(ClusterGetSettingsRequest clusterGetSettingsRequest) throws IOException { + Request request = new Request(HttpGet.METHOD_NAME, "/_cluster/settings"); + + Params parameters = new Params(request); + parameters.withLocal(clusterGetSettingsRequest.local()); + parameters.withIncludeDefaults(clusterGetSettingsRequest.includeDefaults()); + parameters.withMasterTimeout(clusterGetSettingsRequest.masterNodeTimeout()); + + return request; + } + static Request getPipeline(GetPipelineRequest getPipelineRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_ingest/pipeline") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 2a870ec65eea1..58b4b268788b5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -22,6 +22,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -42,6 +44,7 @@ import static java.util.Collections.emptyMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -112,6 +115,46 @@ public void testClusterUpdateSettingNonExistent() { "Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]")); } + public void testClusterGetSettings() throws IOException { + final String transientSettingKey = RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(); + final int transientSettingValue = 10; + + final String persistentSettingKey = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(); + final String persistentSettingValue = EnableAllocationDecider.Allocation.NONE.name(); + + Settings transientSettings = + Settings.builder().put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES).build(); + Settings persistentSettings = Settings.builder().put(persistentSettingKey, persistentSettingValue).build(); + clusterUpdateSettings(persistentSettings, transientSettings); + + ClusterGetSettingsRequest request = new ClusterGetSettingsRequest(); + ClusterGetSettingsResponse response = execute( + request, highLevelClient().cluster()::getSettings, highLevelClient().cluster()::getSettingsAsync); + assertEquals(persistentSettings, response.getPersistentSettings()); + assertEquals(transientSettings, response.getTransientSettings()); + assertEquals(0, response.getDefaultSettings().size()); + } + + public void testClusterGetSettingsWithDefault() throws IOException { + final String transientSettingKey = RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(); + final int transientSettingValue = 10; + + final String persistentSettingKey = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(); + final String persistentSettingValue = EnableAllocationDecider.Allocation.NONE.name(); + + Settings transientSettings = + Settings.builder().put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES).build(); + Settings persistentSettings = Settings.builder().put(persistentSettingKey, persistentSettingValue).build(); + clusterUpdateSettings(persistentSettings, transientSettings); + + ClusterGetSettingsRequest request = new ClusterGetSettingsRequest().includeDefaults(true); + ClusterGetSettingsResponse response = execute( + request, highLevelClient().cluster()::getSettings, highLevelClient().cluster()::getSettingsAsync); + assertEquals(persistentSettings, response.getPersistentSettings()); + assertEquals(transientSettings, response.getTransientSettings()); + assertThat(response.getDefaultSettings().size(), greaterThan(0)); + } + public void testClusterHealthGreen() throws IOException { ClusterHealthRequest request = new ClusterHealthRequest(); request.timeout("5s"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index 69fbab30c336c..e05fa9fa79b90 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -20,9 +20,11 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.ingest.Pipeline; @@ -126,4 +128,12 @@ protected static void createPipeline(String pipelineId) throws IOException { protected static void createPipeline(PutPipelineRequest putPipelineRequest) throws IOException { assertOK(client().performRequest(RequestConverters.putPipeline(putPipelineRequest))); } + + protected static void clusterUpdateSettings(Settings persistentSettings, + Settings transientSettings) throws IOException { + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + request.persistentSettings(persistentSettings); + request.transientSettings(transientSettings); + assertOK(client().performRequest(RequestConverters.clusterPutSettings(request))); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index e838989a0c853..ce72ecc8a59b7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; @@ -1374,42 +1375,42 @@ public void testRenderSearchTemplate() throws Exception { assertEquals(Collections.emptyMap(), request.getParameters()); assertToXContentBody(searchTemplateRequest, request.getEntity()); } - + public void testMultiSearchTemplate() throws Exception { final int numSearchRequests = randomIntBetween(1, 10); MultiSearchTemplateRequest multiSearchTemplateRequest = new MultiSearchTemplateRequest(); - + for (int i = 0; i < numSearchRequests; i++) { // Create a random request. String[] indices = randomIndicesNames(0, 5); SearchRequest searchRequest = new SearchRequest(indices); - + Map expectedParams = new HashMap<>(); setRandomSearchParams(searchRequest, expectedParams); - + // scroll is not supported in the current msearch or msearchtemplate api, so unset it: searchRequest.scroll((Scroll) null); // batched reduce size is currently not set-able on a per-request basis as it is a query string parameter only searchRequest.setBatchedReduceSize(SearchRequest.DEFAULT_BATCHED_REDUCE_SIZE); - + setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); - + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(searchRequest); - + searchTemplateRequest.setScript("{\"query\": { \"match\" : { \"{{field}}\" : \"{{value}}\" }}}"); searchTemplateRequest.setScriptType(ScriptType.INLINE); searchTemplateRequest.setProfile(randomBoolean()); - + Map scriptParams = new HashMap<>(); scriptParams.put("field", "name"); scriptParams.put("value", randomAlphaOfLengthBetween(2, 5)); searchTemplateRequest.setScriptParams(scriptParams); - - multiSearchTemplateRequest.add(searchTemplateRequest); + + multiSearchTemplateRequest.add(searchTemplateRequest); } Request multiRequest = RequestConverters.multiSearchTemplate(multiSearchTemplateRequest); - + assertEquals(HttpPost.METHOD_NAME, multiRequest.getMethod()); assertEquals("/_msearch/template", multiRequest.getEndpoint()); List searchRequests = multiSearchTemplateRequest.requests(); @@ -1418,9 +1419,9 @@ public void testMultiSearchTemplate() throws Exception { HttpEntity actualEntity = multiRequest.getEntity(); byte[] expectedBytes = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, XContentType.JSON.xContent()); assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); - assertEquals(new BytesArray(expectedBytes), new BytesArray(EntityUtils.toByteArray(actualEntity))); + assertEquals(new BytesArray(expectedBytes), new BytesArray(EntityUtils.toByteArray(actualEntity))); } - + public void testExistsAlias() { GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); @@ -1636,6 +1637,21 @@ public void testClusterPutSettings() throws IOException { assertEquals(expectedParams, expectedRequest.getParameters()); } + public void testClusterGetSettings() throws IOException { + ClusterGetSettingsRequest request = new ClusterGetSettingsRequest(); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(request, expectedParams); + request.includeDefaults(randomBoolean()); + if (request.includeDefaults()) { + expectedParams.put("include_defaults", String.valueOf(true)); + } + + Request expectedRequest = RequestConverters.clusterGetSettings(request); + assertEquals("/_cluster/settings", expectedRequest.getEndpoint()); + assertEquals(HttpGet.METHOD_NAME, expectedRequest.getMethod()); + assertEquals(expectedParams, expectedRequest.getParameters()); + } + public void testPutPipeline() throws IOException { String pipelineId = "some_pipeline_id"; PutPipelineRequest request = new PutPipelineRequest( diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index 84a124f764b38..dedd50096f826 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -49,6 +51,7 @@ import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; /** @@ -189,6 +192,71 @@ public void onFailure(Exception e) { } } + public void testClusterGetSettings() throws IOException { + RestHighLevelClient client = highLevelClient(); + + // tag::get-settings-request + ClusterGetSettingsRequest request = new ClusterGetSettingsRequest(); + // end::get-settings-request + + // tag::get-settings-request-includeDefaults + request.includeDefaults(true); // <1> + // end::get-settings-request-includeDefaults + + // tag::get-settings-request-local + request.local(true); // <1> + // end::get-settings-request-local + + // tag::get-settings-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::get-settings-request-masterTimeout + + // tag::get-settings-execute + ClusterGetSettingsResponse response = client.cluster().getSettings(request, RequestOptions.DEFAULT); // <1> + // end::get-settings-execute + + // tag::get-settings-response + Settings persistentSettings = response.getPersistentSettings(); // <1> + Settings transientSettings = response.getTransientSettings(); // <2> + Settings defaultSettings = response.getDefaultSettings(); // <3> + String settingValue = response.getSetting("cluster.routing.allocation.enable"); // <4> + // end::get-settings-response + + assertThat(defaultSettings.size(), greaterThan(0)); + } + + public void testClusterGetSettingsAsync() throws InterruptedException { + RestHighLevelClient client = highLevelClient(); + + ClusterGetSettingsRequest request = new ClusterGetSettingsRequest(); + + // tag::get-settings-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(ClusterGetSettingsResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-settings-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::get-settings-execute-async + client.cluster().getSettingsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::get-settings-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + public void testClusterHealth() throws IOException { RestHighLevelClient client = highLevelClient(); client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT); diff --git a/docs/java-rest/high-level/cluster/get_settings.asciidoc b/docs/java-rest/high-level/cluster/get_settings.asciidoc new file mode 100644 index 0000000000000..999bd92d79117 --- /dev/null +++ b/docs/java-rest/high-level/cluster/get_settings.asciidoc @@ -0,0 +1,92 @@ +[[java-rest-high-cluster-get-settings]] +=== Cluster Get Settings API + +The Cluster Get Settings API allows to get the cluster wide settings. + +[[java-rest-high-cluster-get-settings-request]] +==== Cluster Get Settings Request + +A `ClusterGetSettingsRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-settings-request] +-------------------------------------------------- + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-settings-request-includeDefaults] +-------------------------------------------------- +<1> By default only those settings that were explicitly set are returned. Setting this to true also returns +the default settings. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-settings-request-local] +-------------------------------------------------- +<1> By default the request goes to the master of the cluster to get the latest results. If local is specified it gets +the results from whichever node the request goes to. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-settings-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-cluster-get-settings-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-settings-execute] +-------------------------------------------------- +<1> Execute the request and get back the response in a `ClusterGetSettingsResponse` object. + +[[java-rest-high-cluster-get-settings-async]] +==== Asynchronous Execution + +The asynchronous execution of a cluster get settings requires both the +`ClusterGetSettingsRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-settings-execute-async] +-------------------------------------------------- +<1> The `ClusterGetSettingsRequest` to execute and the `ActionListener` +to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `ClusterGetSettingsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-settings-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-get-settings-response]] +==== Cluster Get Settings Response + +The returned `ClusterGetSettingsResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-settings-response] +-------------------------------------------------- +<1> Get the persistent settings. +<2> Get the transient settings. +<3> Get the default settings (returns empty settings if `includeDefaults` was not set to `true`). +<4> Get the value as a `String` for a particular setting. The order of searching is first in `persistentSettings` then in +`transientSettings` and finally, if not found in either, in `defaultSettings`. diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index d2484db1d7860..93513a042adec 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -118,9 +118,11 @@ include::indices/get_templates.asciidoc[] The Java High Level REST Client supports the following Cluster APIs: * <> +* <> * <> include::cluster/put_settings.asciidoc[] +include::cluster/get_settings.asciidoc[] include::cluster/health.asciidoc[] == Ingest APIs diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index 58e36d0e6795c..f093b6ebcfae0 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -42,6 +42,8 @@ include::cluster/reroute.asciidoc[] include::cluster/update-settings.asciidoc[] +include::cluster/get-settings.asciidoc[] + include::cluster/nodes-stats.asciidoc[] include::cluster/nodes-info.asciidoc[] diff --git a/docs/reference/cluster/get-settings.asciidoc b/docs/reference/cluster/get-settings.asciidoc new file mode 100644 index 0000000000000..b6ea5db1f6dbb --- /dev/null +++ b/docs/reference/cluster/get-settings.asciidoc @@ -0,0 +1,20 @@ +[[cluster-get-settings]] +== Cluster Get Settings + +The cluster get settings API allows to retrieve the cluster wide settings. + +[source,js] +-------------------------------------------------- +GET /_cluster/settings +-------------------------------------------------- +// CONSOLE + +Or +[source,js] +-------------------------------------------------- +GET /_cluster/settings?include_defaults=true +-------------------------------------------------- +// CONSOLE + +In the second example above, the parameter `include_defaults` ensures that the settings which were not set explicitly +are also returned. By default `include_defaults` is set to false. \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java new file mode 100644 index 0000000000000..7048b60fc2a6b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeReadRequest; + +/** + * This request is specific to the REST client. {@link org.elasticsearch.action.admin.cluster.state.ClusterStateRequest} + * is used on the transport layer. + */ +public class ClusterGetSettingsRequest extends MasterNodeReadRequest { + private boolean includeDefaults = false; + + @Override + public ActionRequestValidationException validate() { + return null; + } + + /** + * When include_defaults is set, return default settings which are normally suppressed. + */ + public ClusterGetSettingsRequest includeDefaults(boolean includeDefaults) { + this.includeDefaults = includeDefaults; + return this; + } + + public boolean includeDefaults() { + return includeDefaults; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java new file mode 100644 index 0000000000000..19b0517d96c95 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsResponse.java @@ -0,0 +1,165 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * This response is specific to the REST client. {@link org.elasticsearch.action.admin.cluster.state.ClusterStateResponse} + * is used on the transport layer. + */ +public class ClusterGetSettingsResponse extends ActionResponse implements ToXContentObject { + + private Settings persistentSettings = Settings.EMPTY; + private Settings transientSettings = Settings.EMPTY; + private Settings defaultSettings = Settings.EMPTY; + + static final String PERSISTENT_FIELD = "persistent"; + static final String TRANSIENT_FIELD = "transient"; + static final String DEFAULTS_FIELD = "defaults"; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>( + "cluster_get_settings_response", + true, + a -> { + Settings defaultSettings = a[2] == null ? Settings.EMPTY : (Settings) a[2]; + return new ClusterGetSettingsResponse((Settings) a[0], (Settings) a[1], defaultSettings); + } + ); + static { + PARSER.declareObject(constructorArg(), (p, c) -> Settings.fromXContent(p), new ParseField(PERSISTENT_FIELD)); + PARSER.declareObject(constructorArg(), (p, c) -> Settings.fromXContent(p), new ParseField(TRANSIENT_FIELD)); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> Settings.fromXContent(p), new ParseField(DEFAULTS_FIELD)); + } + + public ClusterGetSettingsResponse(Settings persistentSettings, Settings transientSettings, Settings defaultSettings) { + if (persistentSettings != null) { + this.persistentSettings = persistentSettings; + } + if (transientSettings != null) { + this.transientSettings = transientSettings; + } + if (defaultSettings != null) { + this.defaultSettings = defaultSettings; + } + } + + /** + * Returns the persistent settings for the cluster + * @return Settings + */ + public Settings getPersistentSettings() { + return persistentSettings; + } + + /** + * Returns the transient settings for the cluster + * @return Settings + */ + public Settings getTransientSettings() { + return transientSettings; + } + + /** + * Returns the default settings for the cluster (only if {@code include_defaults} was set to true in the request) + * @return Settings + */ + public Settings getDefaultSettings() { + return defaultSettings; + } + + /** + * Returns the string value of the setting for the specified index. The order of search is first + * in persistent settings the transient settings and finally the default settings. + * @param setting the name of the setting to get + * @return String + */ + public String getSetting(String setting) { + if (persistentSettings.hasValue(setting)) { + return persistentSettings.get(setting); + } else if (transientSettings.hasValue(setting)) { + return transientSettings.get(setting); + } else if (defaultSettings.hasValue(setting)) { + return defaultSettings.get(setting); + } else { + return null; + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + builder.startObject(PERSISTENT_FIELD); + persistentSettings.toXContent(builder, params); + builder.endObject(); + + builder.startObject(TRANSIENT_FIELD); + transientSettings.toXContent(builder, params); + builder.endObject(); + + if (defaultSettings.isEmpty() == false) { + builder.startObject(DEFAULTS_FIELD); + defaultSettings.toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + return builder; + } + + public static ClusterGetSettingsResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClusterGetSettingsResponse that = (ClusterGetSettingsResponse) o; + return Objects.equals(transientSettings, that.transientSettings) && + Objects.equals(persistentSettings, that.persistentSettings) && + Objects.equals(defaultSettings, that.defaultSettings); + } + + @Override + public int hashCode() { + return Objects.hash(transientSettings, persistentSettings, defaultSettings); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index f9716d8d1bade..b452b62eb5e95 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest.action.admin.cluster; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Requests; @@ -65,6 +66,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC .nodes(false); final boolean renderDefaults = request.paramAsBoolean("include_defaults", false); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); + clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); return channel -> client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception { @@ -85,23 +87,13 @@ public boolean canTripCircuitBreaker() { private XContentBuilder renderResponse(ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - - builder.startObject("persistent"); - state.metaData().persistentSettings().toXContent(builder, params); - builder.endObject(); - - builder.startObject("transient"); - state.metaData().transientSettings().toXContent(builder, params); - builder.endObject(); - - if (renderDefaults) { - builder.startObject("defaults"); - settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), this.settings)).toXContent(builder, params); - builder.endObject(); - } - - builder.endObject(); - return builder; + return + new ClusterGetSettingsResponse( + state.metaData().persistentSettings(), + state.metaData().transientSettings(), + renderDefaults ? + settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), this.settings)) : + Settings.EMPTY + ).toXContent(builder, params); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsResponseTests.java new file mode 100644 index 0000000000000..1b307e0713c3b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsResponseTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.function.Predicate; + +public class ClusterGetSettingsResponseTests extends AbstractXContentTestCase { + + @Override + protected ClusterGetSettingsResponse doParseInstance(XContentParser parser) throws IOException { + return ClusterGetSettingsResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected ClusterGetSettingsResponse createTestInstance() { + Settings persistentSettings = ClusterUpdateSettingsResponseTests.randomClusterSettings(0, 2); + Settings transientSettings = ClusterUpdateSettingsResponseTests.randomClusterSettings(0, 2); + Settings defaultSettings = randomBoolean() ? + ClusterUpdateSettingsResponseTests.randomClusterSettings(0, 2): Settings.EMPTY; + return new ClusterGetSettingsResponse(persistentSettings, transientSettings, defaultSettings); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return p -> + p.startsWith(ClusterGetSettingsResponse.TRANSIENT_FIELD) || + p.startsWith(ClusterGetSettingsResponse.PERSISTENT_FIELD) || + p.startsWith(ClusterGetSettingsResponse.DEFAULTS_FIELD); + } +} From ea15284230fbbb574b57ee5da7d49ed44c3cb152 Mon Sep 17 00:00:00 2001 From: Peter Evers Date: Mon, 2 Jul 2018 20:12:49 +0200 Subject: [PATCH 08/36] Docs: Match the examples in the description (#31710) Prose drifted from snippet. --- .../aggregations/metrics/percentile-aggregation.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc b/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc index 4ca9c849b9b61..1903bbc6bcadf 100644 --- a/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/percentile-aggregation.asciidoc @@ -69,8 +69,8 @@ percentiles: `[ 1, 5, 25, 50, 75, 95, 99 ]`. The response will look like this: As you can see, the aggregation will return a calculated value for each percentile in the default range. If we assume response times are in milliseconds, it is -immediately obvious that the webpage normally loads in 10-723ms, but occasionally -spikes to 941-980ms. +immediately obvious that the webpage normally loads in 10-725ms, but occasionally +spikes to 945-985ms. Often, administrators are only interested in outliers -- the extreme percentiles. We can specify just the percents we are interested in (requested percentiles From 8f2feb84143aff46225f0bca84d6f7d0d40f99fc Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Mon, 2 Jul 2018 20:35:26 +0200 Subject: [PATCH 09/36] Watcher: Fix chain input toXcontent serialization (#31721) The xcontent parameters were not passed to the xcontent serialization of the chain input for each chain. This could lead to wrongly stored watches, which did not contain passwords but only their redacted counterparts, when an input inside of a chain input contained a password. --- .../watcher/get_watch/30_with_chain_input.yml | 51 +++++++++++++++++++ .../xpack/watcher/input/chain/ChainInput.java | 2 +- .../watcher/input/chain/ChainInputTests.java | 24 +++++++++ 3 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml new file mode 100644 index 0000000000000..81a12fe6f7ddb --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/30_with_chain_input.yml @@ -0,0 +1,51 @@ +--- +"Test get watch api with chained input and basic auth": + - do: + cluster.health: + wait_for_status: yellow + + - do: + xpack.watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "cron": "0 0 0 1 * ? 2099" + } + }, + "input": { + "chain": { + "inputs": [ + { + "http": { + "http": { + "request": { + "url" : "http://localhost/", + "auth": { + "basic": { + "username": "Username123", + "password": "Password123" + } + } + } + } + } + } + ] + } + }, + "actions": { + "logging": { + "logging": { + "text": "logging statement here" + } + } + } + } + + - do: + xpack.watcher.get_watch: + id: "my_watch" + - match: { found : true} + - match: { _id: "my_watch" } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ChainInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ChainInput.java index 3c62f4d1066d2..1599531429bf5 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ChainInput.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/chain/ChainInput.java @@ -41,7 +41,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startArray(INPUTS.getPreferredName()); for (Tuple tuple : inputs) { builder.startObject().startObject(tuple.v1()); - builder.field(tuple.v2().type(), tuple.v2()); + builder.field(tuple.v2().type(), tuple.v2(), params); builder.endObject().endObject(); } builder.endArray(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainInputTests.java index e654452779ab8..cc19cef7b4768 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainInputTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/input/chain/ChainInputTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -18,6 +19,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.input.Input; import org.elasticsearch.xpack.core.watcher.watch.Payload; import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate; import org.elasticsearch.xpack.watcher.common.http.auth.basic.BasicAuth; @@ -29,6 +31,7 @@ import org.elasticsearch.xpack.watcher.input.simple.SimpleInputFactory; import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -46,6 +49,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; public class ChainInputTests extends ESTestCase { @@ -220,4 +224,24 @@ public void testParsingShouldBeStrictWhenStartingInputs() throws Exception { expectThrows(ElasticsearchParseException.class, () -> chainInputFactory.parseInput("test", parser)); assertThat(e.getMessage(), containsString("Expected starting JSON object after [first] in watch [test]")); } + + public void testThatXContentParametersArePassedToInputs() throws Exception { + ToXContent.Params randomParams = new ToXContent.MapParams(Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5))); + ChainInput chainInput = new ChainInput(Collections.singletonList(Tuple.tuple("whatever", new Input() { + @Override + public String type() { + return "test"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) { + assertThat(params, sameInstance(randomParams)); + return builder; + } + }))); + + try (XContentBuilder builder = jsonBuilder()) { + chainInput.toXContent(builder, randomParams); + } + } } From 631a53a0e1d6be928f6c7bb55bc4c64de5bba695 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 2 Jul 2018 14:44:36 -0700 Subject: [PATCH 10/36] Painless: Add Context Docs (#31190) Adds documentation for each the variables and API available with each script context usable with a Painless script. --- docs/painless/index.asciidoc | 4 +- docs/painless/painless-contexts.asciidoc | 58 ++++++++++++++++ .../painless/painless-contexts/index.asciidoc | 35 ++++++++++ .../painless-bucket-agg-context.asciidoc | 21 ++++++ .../painless-field-context.asciidoc | 31 +++++++++ .../painless-filter-context.asciidoc | 26 +++++++ ...painless-ingest-processor-context.asciidoc | 41 +++++++++++ ...inless-metric-agg-combine-context.asciidoc | 27 ++++++++ .../painless-metric-agg-init-context.asciidoc | 32 +++++++++ .../painless-metric-agg-map-context.asciidoc | 47 +++++++++++++ ...ainless-metric-agg-reduce-context.asciidoc | 28 ++++++++ ...painless-min-should-match-context.asciidoc | 28 ++++++++ .../painless-reindex-context.asciidoc | 68 +++++++++++++++++++ .../painless-score-context.asciidoc | 27 ++++++++ .../painless-similarity-context.asciidoc | 53 +++++++++++++++ .../painless-sort-context.asciidoc | 26 +++++++ .../painless-update-by-query-context.asciidoc | 54 +++++++++++++++ .../painless-update-context.asciidoc | 55 +++++++++++++++ ...ainless-watcher-condition-context.asciidoc | 38 +++++++++++ ...ainless-watcher-transform-context.asciidoc | 39 +++++++++++ .../painless-weight-context.asciidoc | 42 ++++++++++++ .../painless-operators-array.asciidoc | 2 +- docs/painless/painless-scripts.asciidoc | 2 +- 23 files changed, 781 insertions(+), 3 deletions(-) create mode 100644 docs/painless/painless-contexts.asciidoc create mode 100644 docs/painless/painless-contexts/index.asciidoc create mode 100644 docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-field-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-filter-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-min-should-match-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-reindex-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-score-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-similarity-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-sort-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-update-by-query-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-update-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc create mode 100644 docs/painless/painless-contexts/painless-weight-context.asciidoc diff --git a/docs/painless/index.asciidoc b/docs/painless/index.asciidoc index abfd4d4f00abe..92e0a33bf1347 100644 --- a/docs/painless/index.asciidoc +++ b/docs/painless/index.asciidoc @@ -7,4 +7,6 @@ include::painless-getting-started.asciidoc[] include::painless-lang-spec.asciidoc[] -include::painless-api-reference.asciidoc[] +include::painless-contexts.asciidoc[] + +include::painless-api-reference.asciidoc[] \ No newline at end of file diff --git a/docs/painless/painless-contexts.asciidoc b/docs/painless/painless-contexts.asciidoc new file mode 100644 index 0000000000000..ff46f6bd74dde --- /dev/null +++ b/docs/painless/painless-contexts.asciidoc @@ -0,0 +1,58 @@ +[[painless-contexts]] +== Painless contexts + +:es_version: https://www.elastic.co/guide/en/elasticsearch/reference/master +:xp_version: https://www.elastic.co/guide/en/x-pack/current + +A Painless script is evaluated within a context. Each context has values that +are available as local variables, a whitelist that controls the available +classes, and the methods and fields within those classes (API), and +if and what type of value is returned. + +A Painless script is typically executed within one of the contexts in the table +below. Note this is not necessarily a comprehensive list as custom plugins and +specialized code may define new ways to use a Painless script. + +[options="header",cols="<1,<1,<1"] +|==== +| Name | Painless Documentation + | Elasticsearch Documentation +| Update | <> + | {es_version}/docs-update.html[Elasticsearch Documentation] +| Update by query | <> + | {es_version}/docs-update-by-query.html[Elasticsearch Documentation] +| Reindex | <> + | {es_version}/docs-reindex.html[Elasticsearch Documentation] +| Sort | <> + | {es_version}/search-request-sort.html[Elasticsearch Documentation] +| Similarity | <> + | {es_version}/index-modules-similarity.html[Elasticsearch Documentation] +| Weight | <> + | {es_version}/index-modules-similarity.html[Elasticsearch Documentation] +| Score | <> + | {es_version}/query-dsl-function-score-query.html[Elasticsearch Documentation] +| Field | <> + | {es_version}/search-request-script-fields.html[Elasticsearch Documentation] +| Filter | <> + | {es_version}/query-dsl-script-query.html[Elasticsearch Documentation] +| Minimum should match | <> + | {es_version}/query-dsl-terms-set-query.html[Elasticsearch Documentation] +| Metric aggregation initialization | <> + | {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] +| Metric aggregation map | <> + | {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] +| Metric aggregation combine | <> + | {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] +| Metric aggregation reduce | <> + | {es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] +| Bucket aggregation | <> + | {es_version}/search-aggregations-pipeline-bucket-script-aggregation.html[Elasticsearch Documentation] +| Ingest processor | <> + | {es_version}/script-processor.html[Elasticsearch Documentation] +| Watcher condition | <> + | {xp_version}/condition-script.html[Elasticsearch Documentation] +| Watcher transform | <> + | {xp_version}/transform-script.html[Elasticsearch Documentation] +|==== + +include::painless-contexts/index.asciidoc[] diff --git a/docs/painless/painless-contexts/index.asciidoc b/docs/painless/painless-contexts/index.asciidoc new file mode 100644 index 0000000000000..64e4326e052f2 --- /dev/null +++ b/docs/painless/painless-contexts/index.asciidoc @@ -0,0 +1,35 @@ +include::painless-update-context.asciidoc[] + +include::painless-update-by-query-context.asciidoc[] + +include::painless-reindex-context.asciidoc[] + +include::painless-sort-context.asciidoc[] + +include::painless-similarity-context.asciidoc[] + +include::painless-weight-context.asciidoc[] + +include::painless-score-context.asciidoc[] + +include::painless-field-context.asciidoc[] + +include::painless-filter-context.asciidoc[] + +include::painless-min-should-match-context.asciidoc[] + +include::painless-metric-agg-init-context.asciidoc[] + +include::painless-metric-agg-map-context.asciidoc[] + +include::painless-metric-agg-combine-context.asciidoc[] + +include::painless-metric-agg-reduce-context.asciidoc[] + +include::painless-bucket-agg-context.asciidoc[] + +include::painless-ingest-processor-context.asciidoc[] + +include::painless-watcher-condition-context.asciidoc[] + +include::painless-watcher-transform-context.asciidoc[] diff --git a/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc new file mode 100644 index 0000000000000..b277055d87d8b --- /dev/null +++ b/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc @@ -0,0 +1,21 @@ +[[painless-bucket-agg-context]] +=== Bucket aggregation context + +Use a Painless script in an +{es_version}/search-aggregations-pipeline-bucket-script-aggregation.html[bucket aggregation] +to calculate a value as a result in a bucket. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. The parameters + include values defined as part of the `buckets_path`. + +*Return* + +numeric:: + The calculated value as the result. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-field-context.asciidoc b/docs/painless/painless-contexts/painless-field-context.asciidoc new file mode 100644 index 0000000000000..bf44703001bc0 --- /dev/null +++ b/docs/painless/painless-contexts/painless-field-context.asciidoc @@ -0,0 +1,31 @@ +[[painless-field-context]] +=== Field context + +Use a Painless script to create a +{es_version}/search-request-script-fields.html[script field] to return +a customized value for each document in the results of a query. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`doc` (`Map`, read-only):: + Contains the fields of the specified document where each field is a + `List` of values. + +{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`):: + Contains extracted JSON in a `Map` and `List` structure for the fields + existing in a stored document. + +`_score` (`double` read-only):: + The original score of the specified document. + +*Return* + +`Object`:: + The customized value for each document. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-filter-context.asciidoc b/docs/painless/painless-contexts/painless-filter-context.asciidoc new file mode 100644 index 0000000000000..ea0393893c882 --- /dev/null +++ b/docs/painless/painless-contexts/painless-filter-context.asciidoc @@ -0,0 +1,26 @@ +[[painless-filter-context]] +=== Filter context + +Use a Painless script as a {es_version}/query-dsl-script-query.html[filter] in a +query to include and exclude documents. + + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`doc` (`Map`, read-only):: + Contains the fields of the current document where each field is a + `List` of values. + +*Return* + +`boolean`:: + Return `true` if the current document should be returned as a result of + the query, and `false` otherwise. + + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc b/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc new file mode 100644 index 0000000000000..ba3be0739631f --- /dev/null +++ b/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc @@ -0,0 +1,41 @@ +[[painless-ingest-processor-context]] +=== Ingest processor context + +Use a Painless script in an {es_version}/script-processor.html[ingest processor] +to modify documents upon insertion. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`):: + The name of the index. + +{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`):: + The type of document within an index. + +`ctx` (`Map`):: + Contains extracted JSON in a `Map` and `List` structure for the fields + that are part of the document. + +*Side Effects* + +{es_version}/mapping-index-field.html[`ctx['_index']`]:: + Modify this to change the destination index for the current document. + +{es_version}/mapping-type-field.html[`ctx['_type']`]:: + Modify this to change the type for the current document. + +`ctx` (`Map`, read-only):: + Modify the values in the `Map/List` structure to add, modify, or delete + the fields of a document. + +*Return* + +void:: + No expected return value. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc new file mode 100644 index 0000000000000..1fec63ef4466f --- /dev/null +++ b/docs/painless/painless-contexts/painless-metric-agg-combine-context.asciidoc @@ -0,0 +1,27 @@ +[[painless-metric-agg-combine-context]] +=== Metric aggregation combine context + +Use a Painless script to +{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[combine] +values for use in a scripted metric aggregation. A combine script is run once +per shard following a <> and is +optional as part of a full metric aggregation. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`params['_agg']` (`Map`):: + `Map` with values available from the prior map script. + +*Return* + +`List`, `Map`, `String`, or primitive:: + A value collected for use in a + <>. If no reduce + script is specified, the value is used as part of the result. + +*API* + +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc new file mode 100644 index 0000000000000..ed7e01ddd003a --- /dev/null +++ b/docs/painless/painless-contexts/painless-metric-agg-init-context.asciidoc @@ -0,0 +1,32 @@ +[[painless-metric-agg-init-context]] +=== Metric aggregation initialization context + +Use a Painless script to +{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[initialize] +values for use in a scripted metric aggregation. An initialization script is +run prior to document collection once per shard and is optional as part of the +full metric aggregation. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`params['_agg']` (`Map`):: + Empty `Map` used to add values for use in a + <>. + +*Side Effects* + +`params['_agg']` (`Map`):: + Add values to this `Map` to for use in a map. Additional values must + be of the type `Map`, `List`, `String` or primitive. + +*Return* + +`void`:: + No expected return value. + +*API* + +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc new file mode 100644 index 0000000000000..51f06e010db35 --- /dev/null +++ b/docs/painless/painless-contexts/painless-metric-agg-map-context.asciidoc @@ -0,0 +1,47 @@ +[[painless-metric-agg-map-context]] +=== Metric aggregation map context + +Use a Painless script to +{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[map] +values for use in a scripted metric aggregation. A map script is run once per +collected document following an optional +<> and is required as +part of a full metric aggregation. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`params['_agg']` (`Map`):: + `Map` used to add values for processing in a + <> or returned + directly. + +`doc` (`Map`, read-only):: + Contains the fields of the current document where each field is a + `List` of values. + +`_score` (`double` read-only):: + The similarity score of the current document. + +*Side Effects* + +`params['_agg']` (`Map`):: + Use this `Map` to add values for processing in a combine script. + Additional values must be of the type `Map`, `List`, `String` or + primitive. If an initialization script is provided as part the + aggregation then values added from the initialization script are + available as well. If no combine script is specified, values must be + directly stored in `_agg`. If no combine script and no + <> are specified, the + values are used as the result. + +*Return* + +`void`:: + No expected return value. + +*API* + +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc b/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc new file mode 100644 index 0000000000000..1b64b85392d26 --- /dev/null +++ b/docs/painless/painless-contexts/painless-metric-agg-reduce-context.asciidoc @@ -0,0 +1,28 @@ +[[painless-metric-agg-reduce-context]] +=== Metric aggregation reduce context + +Use a Painless script to +{es_version}/search-aggregations-metrics-scripted-metric-aggregation.html[reduce] +values to produce the result of a scripted metric aggregation. A reduce script +is run once on the coordinating node following a +<> (or a +<> if no combine script is +specified) and is optional as part of a full metric aggregation. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`params['_aggs']` (`Map`):: + `Map` with values available from the prior combine script (or a map + script if no combine script is specified). + +*Return* + +`List`, `Map`, `String`, or primitive:: + A value used as the result. + +*API* + +The standard <> is available. diff --git a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc new file mode 100644 index 0000000000000..c310f42928eb4 --- /dev/null +++ b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc @@ -0,0 +1,28 @@ +[[painless-min-should-match-context]] +=== Minimum should match context + +Use a Painless script to specify the +{es_version}/query-dsl-terms-set-query.html[minimum] number of terms that a +specified field needs to match with for a document to be part of the query +results. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`params['num_terms']` (`int`, read-only):: + The number of terms specified to match with. + +`doc` (`Map`, read-only):: + Contains the fields of the current document where each field is a + `List` of values. + +*Return* + +`int`:: + The minimum number of terms required to match the current document. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-reindex-context.asciidoc b/docs/painless/painless-contexts/painless-reindex-context.asciidoc new file mode 100644 index 0000000000000..a8477c8c61996 --- /dev/null +++ b/docs/painless/painless-contexts/painless-reindex-context.asciidoc @@ -0,0 +1,68 @@ +[[painless-reindex-context]] +=== Reindex context + +Use a Painless script in a {es_version}/docs-reindex.html[reindex] operation to +add, modify, or delete fields within each document in an original index as its +reindexed into a target index. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`ctx['_op']` (`String`):: + The name of the operation. + +{es_version}/mapping-routing-field.html[`ctx['_routing']`] (`String`):: + The value used to select a shard for document storage. + +{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`):: + The name of the index. + +{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`):: + The type of document within an index. + +{es_version}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only):: + The unique document id. + +`ctx['_version']` (`int`):: + The current version of the document. + +{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`):: + Contains extracted JSON in a `Map` and `List` structure for the fields + existing in a stored document. + +*Side Effects* + +`ctx['_op']`:: + Use the default of `index` to update a document. Set to `none` to + specify no operation or `delete` to delete the current document from + the index. + +{es_version}/mapping-routing-field.html[`ctx['_routing']`]:: + Modify this to change the routing value for the current document. + +{es_version}/mapping-index-field.html[`ctx['_index']`]:: + Modify this to change the destination index for the current document. + +{es_version}/mapping-type-field.html[`ctx['_type']`]:: + Modify this to change the type for the current document. + +{es_version}/mapping-id-field.html[`ctx['_id']`]:: + Modify this to change the id for the current document. + +`ctx['_version']` (`int`):: + Modify this to modify the version for the current document. + +{es_version}/mapping-source-field.html[`ctx['_source']`]:: + Modify the values in the `Map/List` structure to add, modify, or delete + the fields of a document. + +*Return* + +`void`:: + No expected return value. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-score-context.asciidoc b/docs/painless/painless-contexts/painless-score-context.asciidoc new file mode 100644 index 0000000000000..21667fd31f3b1 --- /dev/null +++ b/docs/painless/painless-contexts/painless-score-context.asciidoc @@ -0,0 +1,27 @@ +[[painless-score-context]] +=== Score context + +Use a Painless script in a +{es_version}/query-dsl-function-score-query.html[function score] to apply a new +score to documents returned from a query. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`doc` (`Map`, read-only):: + Contains the fields of the current document where each field is a + `List` of values. + +`_score` (`double` read-only):: + The similarity score of the current document. + +*Return* + +`double`:: + The score for the current document. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-similarity-context.asciidoc b/docs/painless/painless-contexts/painless-similarity-context.asciidoc new file mode 100644 index 0000000000000..052844c3111a7 --- /dev/null +++ b/docs/painless/painless-contexts/painless-similarity-context.asciidoc @@ -0,0 +1,53 @@ +[[painless-similarity-context]] +=== Similarity context + +Use a Painless script to create a +{es_version}/index-modules-similarity.html[similarity] equation for scoring +documents in a query. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in at query-time. + +`query.boost` (`float`, read-only):: + The boost value if provided by the query. If this is not provided the + value is `1.0f`. + +`field.docCount` (`long`, read-only):: + The number of documents that have a value for the current field. + +`field.sumDocFreq` (`long`, read-only):: + The sum of all terms that exist for the current field. If this is not + available the value is `-1`. + +`field.sumTotalTermFreq` (`long`, read-only):: + The sum of occurrences in the index for all the terms that exist in the + current field. If this is not available the value is `-1`. + +`term.docFreq` (`long`, read-only):: + The number of documents that contain the current term in the index. + +`term.totalTermFreq` (`long`, read-only):: + The total occurrences of the current term in the index. + +`doc.length` (`long`, read-only):: + The number of tokens the current document has in the current field. + +`doc.freq` (`long`, read-only):: + The number of occurrences of the current term in the current + document for the current field. + +*Return* + +`double`:: + The similarity score for the current document. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-sort-context.asciidoc b/docs/painless/painless-contexts/painless-sort-context.asciidoc new file mode 100644 index 0000000000000..7f510fb6a9251 --- /dev/null +++ b/docs/painless/painless-contexts/painless-sort-context.asciidoc @@ -0,0 +1,26 @@ +[[painless-sort-context]] +=== Sort context + +Use a Painless script to +{es_version}/search-request-sort.html[sort] the documents in a query. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`doc` (`Map`, read-only):: + Contains the fields of the current document where each field is a + `List` of values. + +`_score` (`double` read-only):: + The similarity score of the current document. + +*Return* + +`double`:: + The score for the specified document. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc new file mode 100644 index 0000000000000..65666e15844bf --- /dev/null +++ b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc @@ -0,0 +1,54 @@ +[[painless-update-by-query-context]] +=== Update by query context + +Use a Painless script in an +{es_version}/docs-update-by-query.html[update by query] operation to add, +modify, or delete fields within each of a set of documents collected as the +result of query. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`ctx['_op']` (`String`):: + The name of the operation. + +{es_version}/mapping-routing-field.html[`ctx['_routing']`] (`String`, read-only):: + The value used to select a shard for document storage. + +{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only):: + The name of the index. + +{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`, read-only):: + The type of document within an index. + +{es_version}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only):: + The unique document id. + +`ctx['_version']` (`int`, read-only):: + The current version of the document. + +{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`):: + Contains extracted JSON in a `Map` and `List` structure for the fields + existing in a stored document. + +*Side Effects* + +`ctx['_op']`:: + Use the default of `index` to update a document. Set to `none` to + specify no operation or `delete` to delete the current document from + the index. + +{es_version}/mapping-source-field.html[`ctx['_source']`]:: + Modify the values in the `Map/List` structure to add, modify, or delete + the fields of a document. + +*Return* + +`void`:: + No expected return value. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-update-context.asciidoc b/docs/painless/painless-contexts/painless-update-context.asciidoc new file mode 100644 index 0000000000000..b04ba8d9ffb56 --- /dev/null +++ b/docs/painless/painless-contexts/painless-update-context.asciidoc @@ -0,0 +1,55 @@ +[[painless-update-context]] +=== Update context + +Use a Painless script in an {es_version}/docs-update.html[update] operation to +add, modify, or delete fields within a single document. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`ctx['_op']` (`String`):: + The name of the operation. + +{es_version}/mapping-routing-field.html[`ctx['_routing']`] (`String`, read-only):: + The value used to select a shard for document storage. + +{es_version}/mapping-index-field.html[`ctx['_index']`] (`String`, read-only):: + The name of the index. + +{es_version}/mapping-type-field.html[`ctx['_type']`] (`String`, read-only):: + The type of document within an index. + +{es_version}/mapping-id-field.html[`ctx['_id']`] (`int`, read-only):: + The unique document id. + +`ctx['_version']` (`int`, read-only):: + The current version of the document. + +`ctx['_now']` (`long`, read-only):: + The current timestamp in milliseconds. + +{es_version}/mapping-source-field.html[`ctx['_source']`] (`Map`):: + Contains extracted JSON in a `Map` and `List` structure for the fields + existing in a stored document. + +*Side Effects* + +`ctx['_op']`:: + Use the default of `index` to update a document. Set to `none` to + specify no operation or `delete` to delete the current document from + the index. + +{es_version}/mapping-source-field.html[`ctx['_source']`]:: + Modify the values in the `Map/List` structure to add, modify, or delete + the fields of a document. + +*Return* + +`void`:: + No expected return value. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc new file mode 100644 index 0000000000000..3a5e460a55de7 --- /dev/null +++ b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc @@ -0,0 +1,38 @@ +[[painless-watcher-condition-context]] +=== Watcher condition context + +Use a Painless script as a {xp_version}/condition-script.html[watcher condition] +to test if a response is necessary. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`ctx['watch_id']` (`String`, read-only):: + The id of the watch. + +`ctx['execution_time']` (`DateTime`, read-only):: + The start time for the watch. + +`ctx['trigger']['scheduled_time']` (`DateTime`, read-only):: + The scheduled trigger time for the watch. + +`ctx['trigger']['triggered_time']` (`DateTime`, read-only):: + The actual trigger time for the watch. + +`ctx['metadata']` (`Map`, read-only):: + Any metadata associated with the watch. + +`ctx['payload']` (`Map`, read-only):: + The accessible watch data based upon the + {xp_version}/input.html[watch input]. + +*Return* + +`boolean`:: + Expects `true` if the condition is met, and `false` otherwise. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc new file mode 100644 index 0000000000000..1831da5a9f87b --- /dev/null +++ b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc @@ -0,0 +1,39 @@ +[[painless-watcher-transform-context]] +=== Watcher transform context + +Use a Painless script to {xp_version}/transform-script.html[transform] watch +data into a new payload for use in a response to a condition. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`ctx['watch_id']` (`String`, read-only):: + The id of the watch. + +`ctx['execution_time']` (`DateTime`, read-only):: + The start time for the watch. + +`ctx['trigger']['scheduled_time']` (`DateTime`, read-only):: + The scheduled trigger time for the watch. + +`ctx['trigger']['triggered_time']` (`DateTime`, read-only):: + The actual trigger time for the watch. + +`ctx['metadata']` (`Map`, read-only):: + Any metadata associated with the watch. + +`ctx['payload']` (`Map`, read-only):: + The accessible watch data based upon the + {xp_version}/input.html[watch input]. + + +*Return* + +`Object`:: + The new payload. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-weight-context.asciidoc b/docs/painless/painless-contexts/painless-weight-context.asciidoc new file mode 100644 index 0000000000000..0aef936183c0f --- /dev/null +++ b/docs/painless/painless-contexts/painless-weight-context.asciidoc @@ -0,0 +1,42 @@ +[[painless-weight-context]] +=== Weight context + +Use a Painless script to create a +{es_version}/index-modules-similarity.html[weight] for use in a +<>. Weight is used to prevent +recalculation of constants that remain the same across documents. + +*Variables* + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. + +`query.boost` (`float`, read-only):: + The boost value if provided by the query. If this is not provided the + value is `1.0f`. + +`field.docCount` (`long`, read-only):: + The number of documents that have a value for the current field. + +`field.sumDocFreq` (`long`, read-only):: + The sum of all terms that exist for the current field. If this is not + available the value is `-1`. + +`field.sumTotalTermFreq` (`long`, read-only):: + The sum of occurrences in the index for all the terms that exist in the + current field. If this is not available the value is `-1`. + +`term.docFreq` (`long`, read-only):: + The number of documents that contain the current term in the index. + +`term.totalTermFreq` (`long`, read-only):: + The total occurrences of the current term in the index. + +*Return* + +`double`:: + A scoring factor used across all documents. + +*API* + +The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-operators-array.asciidoc b/docs/painless/painless-operators-array.asciidoc index e91c07acef5c0..acfb87d30af1b 100644 --- a/docs/painless/painless-operators-array.asciidoc +++ b/docs/painless/painless-operators-array.asciidoc @@ -184,7 +184,7 @@ brace_access: '[' expression ']' store `def` to `x` <5> declare `def y`; implicit cast `int 1` to `def` -> `def`; - store `def ` to `y`; + store `def` to `y`; <6> declare `int i`; load from `d` -> `def` implicit cast `def` to `1-d int array reference` diff --git a/docs/painless/painless-scripts.asciidoc b/docs/painless/painless-scripts.asciidoc index 87e5b60159060..81fdbbe7367db 100644 --- a/docs/painless/painless-scripts.asciidoc +++ b/docs/painless/painless-scripts.asciidoc @@ -3,4 +3,4 @@ Scripts are composed of one-to-many <> and are run in a sandbox that determines what local variables are immediately available -along with what APIs are whitelisted for use. \ No newline at end of file +along with what APIs are whitelisted for use. From 2bb4f3837113952488a9c71773107b024b756d14 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 3 Jul 2018 09:13:50 +0200 Subject: [PATCH 11/36] Add write*Blob option to replace existing blob (#31729) Adds a new parameter to the BlobContainer#write*Blob methods to specify whether the existing file should be overridden or not. For some metadata files in the repository, we actually want to replace the current file. This is currently implemented through an explicit blob delete and then a fresh write. In case of using a cloud provider (S3, GCS, Azure), this results in 2 API requests instead of just 1. This change will therefore allow us to achieve the same functionality using less API requests. --- .../blobstore/url/URLBlobContainer.java | 2 +- .../azure/AzureStorageFixture.java | 11 ++++-- .../azure/AzureBlobContainer.java | 4 +-- .../repositories/azure/AzureBlobStore.java | 6 ++-- .../azure/AzureStorageService.java | 9 +++-- .../azure/AzureStorageServiceMock.java | 5 +-- .../gcs/GoogleCloudStorageFixture.java | 19 +++++----- .../gcs/GoogleCloudStorageBlobContainer.java | 4 +-- .../gcs/GoogleCloudStorageBlobStore.java | 35 +++++++++++-------- .../repositories/hdfs/HdfsBlobContainer.java | 5 +-- .../hdfs/HdfsBlobStoreContainerTests.java | 2 +- .../repositories/s3/S3BlobContainer.java | 5 ++- .../common/blobstore/BlobContainer.java | 17 +++++---- .../common/blobstore/fs/FsBlobContainer.java | 19 +++++++--- .../blobstore/BlobStoreRepository.java | 18 +++++----- .../blobstore/ChecksumBlobStoreFormat.java | 4 +-- .../snapshots/BlobStoreFormatIT.java | 6 ++-- .../mockstore/BlobContainerWrapper.java | 9 ++--- .../snapshots/mockstore/MockRepository.java | 15 ++++---- .../ESBlobStoreContainerTestCase.java | 22 +++++++----- .../repositories/ESBlobStoreTestCase.java | 2 +- 21 files changed, 131 insertions(+), 88 deletions(-) diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java index fb20b73b61c00..7b72871f4f78d 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java @@ -108,7 +108,7 @@ public InputStream readBlob(String name) throws IOException { } @Override - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { throw new UnsupportedOperationException("URL repository doesn't support this operation"); } diff --git a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java index f906b9fa9a913..0bd9503f43dac 100644 --- a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java +++ b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java @@ -122,15 +122,20 @@ private static PathTrie defaultHandlers(final Map { final String destContainerName = request.getParam("container"); final String destBlobName = objectName(request.getParameters()); + final String ifNoneMatch = request.getHeader("If-None-Match"); final Container destContainer = containers.get(destContainerName); if (destContainer == null) { return newContainerNotFoundError(request.getId()); } - byte[] existingBytes = destContainer.objects.putIfAbsent(destBlobName, request.getBody()); - if (existingBytes != null) { - return newBlobAlreadyExistsError(request.getId()); + if ("*".equals(ifNoneMatch)) { + byte[] existingBytes = destContainer.objects.putIfAbsent(destBlobName, request.getBody()); + if (existingBytes != null) { + return newBlobAlreadyExistsError(request.getId()); + } + } else { + destContainer.objects.put(destBlobName, request.getBody()); } return new Response(RestStatus.CREATED.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE); }) diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 751e00f06adbb..5d5330e8cb563 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -86,11 +86,11 @@ public InputStream readBlob(String blobName) throws IOException { } @Override - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { logger.trace("writeBlob({}, stream, {})", buildKey(blobName), blobSize); try { - blobStore.writeBlob(buildKey(blobName), inputStream, blobSize); + blobStore.writeBlob(buildKey(blobName), inputStream, blobSize, failIfAlreadyExists); } catch (URISyntaxException|StorageException e) { throw new IOException("Can not write blob " + blobName, e); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index bcd6b936af1aa..f4bc362e53602 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -117,8 +117,8 @@ public Map listBlobsByPrefix(String keyPath, String prefix return service.listBlobsByPrefix(clientName, container, keyPath, prefix); } - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException, - FileAlreadyExistsException { - service.writeBlob(this.clientName, container, blobName, inputStream, blobSize); + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) + throws URISyntaxException, StorageException, FileAlreadyExistsException { + service.writeBlob(this.clientName, container, blobName, inputStream, blobSize, failIfAlreadyExists); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 73dd68f4b5f57..9482182b02d28 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -236,17 +236,20 @@ public Map listBlobsByPrefix(String account, String contai return blobsBuilder.immutableMap(); } - public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) + public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize, + boolean failIfAlreadyExists) throws URISyntaxException, StorageException, FileAlreadyExistsException { logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize)); final Tuple> client = client(account); final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); final CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName); try { + final AccessCondition accessCondition = + failIfAlreadyExists ? AccessCondition.generateIfNotExistsCondition() : AccessCondition.generateEmptyCondition(); SocketAccess.doPrivilegedVoidException(() -> - blob.upload(inputStream, blobSize, AccessCondition.generateIfNotExistsCondition(), null, client.v2().get())); + blob.upload(inputStream, blobSize, accessCondition, null, client.v2().get())); } catch (final StorageException se) { - if (se.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT && + if (failIfAlreadyExists && se.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT && StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) { throw new FileAlreadyExistsException(blobName, null, se.getMessage()); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 264cb90378529..18eb529c0eebe 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -108,9 +108,10 @@ public Map listBlobsByPrefix(String account, String contai } @Override - public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) + public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize, + boolean failIfAlreadyExists) throws URISyntaxException, StorageException, FileAlreadyExistsException { - if (blobs.containsKey(blobName)) { + if (failIfAlreadyExists && blobs.containsKey(blobName)) { throw new FileAlreadyExistsException(blobName); } try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java index b1a185c9c08c9..b37b89b243ba7 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java @@ -158,10 +158,6 @@ private static PathTrie defaultHandlers(final Map { final String ifGenerationMatch = request.getParam("ifGenerationMatch"); - if ("0".equals(ifGenerationMatch) == false) { - return newError(RestStatus.PRECONDITION_FAILED, "object already exist"); - } - final String uploadType = request.getParam("uploadType"); if ("resumable".equals(uploadType)) { final String objectName = request.getParam("name"); @@ -172,12 +168,19 @@ private static PathTrie defaultHandlers(final Map LARGE_BLOB_THRESHOLD_BYTE_SIZE) { - writeBlobResumable(blobInfo, inputStream); + writeBlobResumable(blobInfo, inputStream, failIfAlreadyExists); } else { - writeBlobMultipart(blobInfo, inputStream, blobSize); + writeBlobMultipart(blobInfo, inputStream, blobSize, failIfAlreadyExists); } } @@ -210,14 +210,17 @@ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws I * Uploads a blob using the "resumable upload" method (multiple requests, which * can be independently retried in case of failure, see * https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload - * * @param blobInfo the info for the blob to be uploaded * @param inputStream the stream containing the blob data + * @param failIfAlreadyExists whether to throw a FileAlreadyExistsException if the given blob already exists */ - private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream) throws IOException { + private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream, boolean failIfAlreadyExists) throws IOException { try { + final Storage.BlobWriteOption[] writeOptions = failIfAlreadyExists ? + new Storage.BlobWriteOption[] { Storage.BlobWriteOption.doesNotExist() } : + new Storage.BlobWriteOption[0]; final WriteChannel writeChannel = SocketAccess - .doPrivilegedIOException(() -> client().writer(blobInfo, Storage.BlobWriteOption.doesNotExist())); + .doPrivilegedIOException(() -> client().writer(blobInfo, writeOptions)); Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { @Override public boolean isOpen() { @@ -236,7 +239,7 @@ public int write(ByteBuffer src) throws IOException { } })); } catch (final StorageException se) { - if (se.getCode() == HTTP_PRECON_FAILED) { + if (failIfAlreadyExists && se.getCode() == HTTP_PRECON_FAILED) { throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); } throw se; @@ -248,20 +251,24 @@ public int write(ByteBuffer src) throws IOException { * 'multipart/related' request containing both data and metadata. The request is * gziped), see: * https://cloud.google.com/storage/docs/json_api/v1/how-tos/multipart-upload - * - * @param blobInfo the info for the blob to be uploaded + * @param blobInfo the info for the blob to be uploaded * @param inputStream the stream containing the blob data * @param blobSize the size + * @param failIfAlreadyExists whether to throw a FileAlreadyExistsException if the given blob already exists */ - private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long blobSize) throws IOException { + private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) + throws IOException { assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method"; final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize)); Streams.copy(inputStream, baos); try { + final Storage.BlobTargetOption[] targetOptions = failIfAlreadyExists ? + new Storage.BlobTargetOption[] { Storage.BlobTargetOption.doesNotExist() } : + new Storage.BlobTargetOption[0]; SocketAccess.doPrivilegedVoidIOException( - () -> client().create(blobInfo, baos.toByteArray(), Storage.BlobTargetOption.doesNotExist())); + () -> client().create(blobInfo, baos.toByteArray(), targetOptions)); } catch (final StorageException se) { - if (se.getCode() == HTTP_PRECON_FAILED) { + if (failIfAlreadyExists && se.getCode() == HTTP_PRECON_FAILED) { throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); } throw se; diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index 23557ae6cf84a..580d033354e58 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -91,11 +91,12 @@ public InputStream readBlob(String blobName) throws IOException { } @Override - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { store.execute((Operation) fileContext -> { Path blob = new Path(path, blobName); // we pass CREATE, which means it fails if a blob already exists. - EnumSet flags = EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK); + EnumSet flags = failIfAlreadyExists ? EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK) : + EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK); CreateOpts[] opts = {CreateOpts.bufferSize(bufferSize)}; try (FSDataOutputStream stream = fileContext.create(blob, flags, opts)) { int bytesRead; diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index a5d68331db78e..ba00862e93848 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -135,7 +135,7 @@ public void testReadOnly() throws Exception { assertTrue(util.exists(hdfsPath)); byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); - writeBlob(container, "foo", new BytesArray(data)); + writeBlob(container, "foo", new BytesArray(data), randomBoolean()); assertArrayEquals(readBlobFully(container, "foo", data.length), data); assertTrue(container.blobExists("foo")); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 86b01a3e79cdd..b7cc2b89605d3 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -90,8 +90,11 @@ public InputStream readBlob(String blobName) throws IOException { } } + /** + * This implementation ignores the failIfAlreadyExists flag as the S3 API has no way to enforce this due to its weak consistency model. + */ @Override - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { SocketAccess.doPrivilegedIOException(() -> { if (blobSize <= blobStore.bufferSizeInBytes()) { executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize); diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index db185f1e8c11c..2ecce44b55c1e 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -69,16 +69,18 @@ public interface BlobContainer { * @param blobSize * The size of the blob to be written, in bytes. It is implementation dependent whether * this value is used in writing the blob to the repository. - * @throws FileAlreadyExistsException if a blob by the same name already exists + * @param failIfAlreadyExists + * whether to throw a FileAlreadyExistsException if the given blob already exists + * @throws FileAlreadyExistsException if failIfAlreadyExists is true and a blob by the same name already exists * @throws IOException if the input stream could not be read, or the target blob could not be written to. */ - void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException; + void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException; /** * Reads blob content from the input stream and writes it to the container in a new blob with the given name, * using an atomic write operation if the implementation supports it. When the BlobContainer implementation * does not provide a specific implementation of writeBlobAtomic(String, InputStream, long), then - * the {@link #writeBlob(String, InputStream, long)} method is used. + * the {@link #writeBlob(String, InputStream, long, boolean)} method is used. * * This method assumes the container does not already contain a blob of the same blobName. If a blob by the * same name already exists, the operation will fail and an {@link IOException} will be thrown. @@ -90,11 +92,14 @@ public interface BlobContainer { * @param blobSize * The size of the blob to be written, in bytes. It is implementation dependent whether * this value is used in writing the blob to the repository. - * @throws FileAlreadyExistsException if a blob by the same name already exists + * @param failIfAlreadyExists + * whether to throw a FileAlreadyExistsException if the given blob already exists + * @throws FileAlreadyExistsException if failIfAlreadyExists is true and a blob by the same name already exists * @throws IOException if the input stream could not be read, or the target blob could not be written to. */ - default void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { - writeBlob(blobName, inputStream, blobSize); + default void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists) + throws IOException { + writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); } /** diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index a58802ecd1828..bab984bd85c74 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -124,7 +124,10 @@ public InputStream readBlob(String name) throws IOException { } @Override - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + if (failIfAlreadyExists == false) { + deleteBlobIgnoringIfNotExists(blobName); + } final Path file = path.resolve(blobName); try (OutputStream outputStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW)) { Streams.copy(inputStream, outputStream); @@ -134,7 +137,8 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize) t } @Override - public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists) + throws IOException { final String tempBlob = tempBlobName(blobName); final Path tempBlobPath = path.resolve(tempBlob); try { @@ -142,7 +146,7 @@ public void writeBlobAtomic(final String blobName, final InputStream inputStream Streams.copy(inputStream, outputStream); } IOUtils.fsync(tempBlobPath, false); - moveBlobAtomic(tempBlob, blobName); + moveBlobAtomic(tempBlob, blobName, failIfAlreadyExists); } catch (IOException ex) { try { deleteBlobIgnoringIfNotExists(tempBlob); @@ -155,13 +159,18 @@ public void writeBlobAtomic(final String blobName, final InputStream inputStream } } - public void moveBlobAtomic(final String sourceBlobName, final String targetBlobName) throws IOException { + public void moveBlobAtomic(final String sourceBlobName, final String targetBlobName, final boolean failIfAlreadyExists) + throws IOException { final Path sourceBlobPath = path.resolve(sourceBlobName); final Path targetBlobPath = path.resolve(targetBlobName); // If the target file exists then Files.move() behaviour is implementation specific // the existing file might be replaced or this method fails by throwing an IOException. if (Files.exists(targetBlobPath)) { - throw new FileAlreadyExistsException("blob [" + targetBlobPath + "] already exists, cannot overwrite"); + if (failIfAlreadyExists) { + throw new FileAlreadyExistsException("blob [" + targetBlobPath + "] already exists, cannot overwrite"); + } else { + deleteBlobIgnoringIfNotExists(targetBlobName); + } } Files.move(sourceBlobPath, targetBlobPath, StandardCopyOption.ATOMIC_MOVE); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 0151e4e7322d5..86131fe468d28 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -556,7 +556,7 @@ public String startVerification() { String blobName = "master.dat"; BytesArray bytes = new BytesArray(testBytes); try (InputStream stream = bytes.streamInput()) { - testContainer.writeBlobAtomic(blobName, stream, bytes.length()); + testContainer.writeBlobAtomic(blobName, stream, bytes.length(), true); } return seed; } @@ -664,7 +664,7 @@ protected void writeIndexGen(final RepositoryData repositoryData, final long rep // write the index file final String indexBlob = INDEX_FILE_PREFIX + Long.toString(newGen); logger.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob); - writeAtomic(indexBlob, snapshotsBytes); + writeAtomic(indexBlob, snapshotsBytes, true); // delete the N-2 index file if it exists, keep the previous one around as a backup if (isReadOnly() == false && newGen - 2 >= 0) { final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(newGen - 2); @@ -677,9 +677,8 @@ protected void writeIndexGen(final RepositoryData repositoryData, final long rep bStream.writeLong(newGen); genBytes = bStream.bytes(); } - snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(INDEX_LATEST_BLOB); logger.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen); - writeAtomic(INDEX_LATEST_BLOB, genBytes); + writeAtomic(INDEX_LATEST_BLOB, genBytes, false); } /** @@ -698,9 +697,8 @@ void writeIncompatibleSnapshots(RepositoryData repositoryData) throws IOExceptio } bytes = bStream.bytes(); } - snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(INCOMPATIBLE_SNAPSHOTS_BLOB); // write the incompatible snapshots blob - writeAtomic(INCOMPATIBLE_SNAPSHOTS_BLOB, bytes); + writeAtomic(INCOMPATIBLE_SNAPSHOTS_BLOB, bytes, false); } /** @@ -766,9 +764,9 @@ private long listBlobsToGetLatestIndexId() throws IOException { return latest; } - private void writeAtomic(final String blobName, final BytesReference bytesRef) throws IOException { + private void writeAtomic(final String blobName, final BytesReference bytesRef, boolean failIfAlreadyExists) throws IOException { try (InputStream stream = bytesRef.streamInput()) { - snapshotsBlobContainer.writeBlobAtomic(blobName, stream, bytesRef.length()); + snapshotsBlobContainer.writeBlobAtomic(blobName, stream, bytesRef.length(), failIfAlreadyExists); } } @@ -813,7 +811,7 @@ public void verify(String seed, DiscoveryNode localNode) { try { BytesArray bytes = new BytesArray(seed); try (InputStream stream = bytes.streamInput()) { - testBlobContainer.writeBlob("data-" + localNode.getId() + ".dat", stream, bytes.length()); + testBlobContainer.writeBlob("data-" + localNode.getId() + ".dat", stream, bytes.length(), true); } } catch (IOException exp) { throw new RepositoryVerificationException(metadata.name(), "store location [" + blobStore() + "] is not accessible on the node [" + localNode + "]", exp); @@ -1252,7 +1250,7 @@ private void snapshotFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo) t snapshotRateLimitingTimeInNanos::inc); } inputStream = new AbortableInputStream(inputStream, fileInfo.physicalName()); - blobContainer.writeBlob(fileInfo.partName(i), inputStream, partBytes); + blobContainer.writeBlob(fileInfo.partName(i), inputStream, partBytes, true); } Store.verify(indexInput); snapshotStatus.addProcessedFile(fileInfo.length()); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index b974be2b869ab..ca6ec74dc2ce2 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -132,7 +132,7 @@ public void writeAtomic(T obj, BlobContainer blobContainer, String name) throws final String blobName = blobName(name); writeTo(obj, blobName, bytesArray -> { try (InputStream stream = bytesArray.streamInput()) { - blobContainer.writeBlobAtomic(blobName, stream, bytesArray.length()); + blobContainer.writeBlobAtomic(blobName, stream, bytesArray.length(), true); } }); } @@ -150,7 +150,7 @@ public void write(T obj, BlobContainer blobContainer, String name) throws IOExce final String blobName = blobName(name); writeTo(obj, blobName, bytesArray -> { try (InputStream stream = bytesArray.streamInput()) { - blobContainer.writeBlob(blobName, stream, bytesArray.length()); + blobContainer.writeBlob(blobName, stream, bytesArray.length(), true); } }); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java index 70be72989cf95..6f4f69ad67e88 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java @@ -224,7 +224,8 @@ public void testAtomicWriteFailures() throws Exception { IOException writeBlobException = expectThrows(IOException.class, () -> { BlobContainer wrapper = new BlobContainerWrapper(blobContainer) { @Override - public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize) throws IOException { + public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) + throws IOException { throw new IOException("Exception thrown in writeBlobAtomic() for " + blobName); } }; @@ -251,10 +252,9 @@ protected void randomCorruption(BlobContainer blobContainer, String blobName) th int location = randomIntBetween(0, buffer.length - 1); buffer[location] = (byte) (buffer[location] ^ 42); } while (originalChecksum == checksum(buffer)); - blobContainer.deleteBlob(blobName); // delete original before writing new blob BytesArray bytesArray = new BytesArray(buffer); try (StreamInput stream = bytesArray.streamInput()) { - blobContainer.writeBlob(blobName, stream, bytesArray.length()); + blobContainer.writeBlob(blobName, stream, bytesArray.length(), false); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java index b5c6339724123..5666869a1aa0b 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java @@ -49,13 +49,14 @@ public InputStream readBlob(String name) throws IOException { } @Override - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - delegate.writeBlob(blobName, inputStream, blobSize); + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + delegate.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); } @Override - public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { - delegate.writeBlobAtomic(blobName, inputStream, blobSize); + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, + boolean failIfAlreadyExists) throws IOException { + delegate.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists); } @Override diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index d0702acf10373..d05a10905d858 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -346,9 +346,9 @@ public Map listBlobsByPrefix(String blobNamePrefix) throws } @Override - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { maybeIOExceptionOrBlock(blobName); - super.writeBlob(blobName, inputStream, blobSize); + super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); if (RandomizedContext.current().getRandom().nextBoolean()) { // for network based repositories, the blob may have been written but we may still // get an error with the client connection, so an IOException here simulates this @@ -357,27 +357,28 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize) t } @Override - public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, + final boolean failIfAlreadyExists) throws IOException { final Random random = RandomizedContext.current().getRandom(); if (allowAtomicOperations && random.nextBoolean()) { if ((delegate() instanceof FsBlobContainer) && (random.nextBoolean())) { // Simulate a failure between the write and move operation in FsBlobContainer final String tempBlobName = FsBlobContainer.tempBlobName(blobName); - super.writeBlob(tempBlobName, inputStream, blobSize); + super.writeBlob(tempBlobName, inputStream, blobSize, failIfAlreadyExists); maybeIOExceptionOrBlock(blobName); final FsBlobContainer fsBlobContainer = (FsBlobContainer) delegate(); - fsBlobContainer.moveBlobAtomic(tempBlobName, blobName); + fsBlobContainer.moveBlobAtomic(tempBlobName, blobName, failIfAlreadyExists); } else { // Atomic write since it is potentially supported // by the delegating blob container maybeIOExceptionOrBlock(blobName); - super.writeBlobAtomic(blobName, inputStream, blobSize); + super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists); } } else { // Simulate a non-atomic write since many blob container // implementations does not support atomic write maybeIOExceptionOrBlock(blobName); - super.writeBlob(blobName, inputStream, blobSize); + super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java index 43a62bbe662cc..9f12c36999145 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java @@ -61,7 +61,12 @@ public void testWriteRead() throws IOException { try(BlobStore store = newBlobStore()) { final BlobContainer container = store.blobContainer(new BlobPath()); byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); - writeBlob(container, "foobar", new BytesArray(data)); + writeBlob(container, "foobar", new BytesArray(data), randomBoolean()); + if (randomBoolean()) { + // override file, to check if we get latest contents + data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); + writeBlob(container, "foobar", new BytesArray(data), false); + } try (InputStream stream = container.readBlob("foobar")) { BytesRefBuilder target = new BytesRefBuilder(); while (target.length() < data.length) { @@ -123,7 +128,7 @@ public void testDeleteBlob() throws IOException { byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); final BytesArray bytesArray = new BytesArray(data); - writeBlob(container, blobName, bytesArray); + writeBlob(container, blobName, bytesArray, randomBoolean()); container.deleteBlob(blobName); // should not raise // blob deleted, so should raise again @@ -149,20 +154,21 @@ public void testVerifyOverwriteFails() throws IOException { final BlobContainer container = store.blobContainer(new BlobPath()); byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); final BytesArray bytesArray = new BytesArray(data); - writeBlob(container, blobName, bytesArray); + writeBlob(container, blobName, bytesArray, true); // should not be able to overwrite existing blob - expectThrows(FileAlreadyExistsException.class, () -> writeBlob(container, blobName, bytesArray)); + expectThrows(FileAlreadyExistsException.class, () -> writeBlob(container, blobName, bytesArray, true)); container.deleteBlob(blobName); - writeBlob(container, blobName, bytesArray); // after deleting the previous blob, we should be able to write to it again + writeBlob(container, blobName, bytesArray, true); // after deleting the previous blob, we should be able to write to it again } } - protected void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray) throws IOException { + protected void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray, + boolean failIfAlreadyExists) throws IOException { try (InputStream stream = bytesArray.streamInput()) { if (randomBoolean()) { - container.writeBlob(blobName, stream, bytesArray.length()); + container.writeBlob(blobName, stream, bytesArray.length(), failIfAlreadyExists); } else { - container.writeBlobAtomic(blobName, stream, bytesArray.length()); + container.writeBlobAtomic(blobName, stream, bytesArray.length(), failIfAlreadyExists); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java index 35a17c2a8dd83..ccc38ae362991 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java @@ -80,7 +80,7 @@ public static byte[] randomBytes(int length) { protected static void writeBlob(BlobContainer container, String blobName, BytesArray bytesArray) throws IOException { try (InputStream stream = bytesArray.streamInput()) { - container.writeBlob(blobName, stream, bytesArray.length()); + container.writeBlob(blobName, stream, bytesArray.length(), true); } } From ee4dbc8dedaea2323696057ed44c7dc2cb9fc0d9 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 3 Jul 2018 09:16:26 +0200 Subject: [PATCH 12/36] Split CircuitBreaker-related tests (#31659) `MemoryCircuitBreakerTests` conflates two test aspects: It tests individual circuit breakers as well as the circuit breaker hierarchy. With this commit we split those two aspects into two test classes: * Tests for individual circuit breakers stay in the current class * Other tests are moved to `HierarchyCircuitBreakerServiceTests` --- .../breaker/MemoryCircuitBreakerTests.java | 197 +---------------- .../HierarchyCircuitBreakerServiceTests.java | 202 ++++++++++++++++++ 2 files changed, 213 insertions(+), 186 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java diff --git a/server/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java b/server/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java index 56182fb90febe..31a84423db97a 100644 --- a/server/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java +++ b/server/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java @@ -19,20 +19,12 @@ package org.elasticsearch.common.breaker; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.indices.breaker.BreakerSettings; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -50,21 +42,18 @@ public void testThreadedUpdatesToBreaker() throws Exception { final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue((BYTES_PER_THREAD * NUM_THREADS) - 1), 1.0, logger); for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new Thread(new Runnable() { - @Override - public void run() { - for (int j = 0; j < BYTES_PER_THREAD; j++) { - try { - breaker.addEstimateBytesAndMaybeBreak(1L, "test"); - } catch (CircuitBreakingException e) { - if (tripped.get()) { - assertThat("tripped too many times", true, equalTo(false)); - } else { - assertThat(tripped.compareAndSet(false, true), equalTo(true)); - } - } catch (Exception e) { - lastException.set(e); + threads[i] = new Thread(() -> { + for (int j = 0; j < BYTES_PER_THREAD; j++) { + try { + breaker.addEstimateBytesAndMaybeBreak(1L, "test"); + } catch (CircuitBreakingException e) { + if (tripped.get()) { + assertThat("tripped too many times", true, equalTo(false)); + } else { + assertThat(tripped.compareAndSet(false, true), equalTo(true)); } + } catch (Exception e) { + lastException.set(e); } } }); @@ -81,134 +70,6 @@ public void run() { assertThat("breaker was tripped at least once", breaker.getTrippedCount(), greaterThanOrEqualTo(1L)); } - public void testThreadedUpdatesToChildBreaker() throws Exception { - final int NUM_THREADS = scaledRandomIntBetween(3, 15); - final int BYTES_PER_THREAD = scaledRandomIntBetween(500, 4500); - final Thread[] threads = new Thread[NUM_THREADS]; - final AtomicBoolean tripped = new AtomicBoolean(false); - final AtomicReference lastException = new AtomicReference<>(null); - - final AtomicReference breakerRef = new AtomicReference<>(null); - final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { - - @Override - public CircuitBreaker getBreaker(String name) { - return breakerRef.get(); - } - - @Override - public void checkParentLimit(String label) throws CircuitBreakingException { - // never trip - } - }; - final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, (BYTES_PER_THREAD * NUM_THREADS) - 1, 1.0); - final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker(settings, logger, - (HierarchyCircuitBreakerService)service, CircuitBreaker.REQUEST); - breakerRef.set(breaker); - - for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new Thread(new Runnable() { - @Override - public void run() { - for (int j = 0; j < BYTES_PER_THREAD; j++) { - try { - breaker.addEstimateBytesAndMaybeBreak(1L, "test"); - } catch (CircuitBreakingException e) { - if (tripped.get()) { - assertThat("tripped too many times", true, equalTo(false)); - } else { - assertThat(tripped.compareAndSet(false, true), equalTo(true)); - } - } catch (Exception e) { - lastException.set(e); - } - } - } - }); - - threads[i].start(); - } - - for (Thread t : threads) { - t.join(); - } - - assertThat("no other exceptions were thrown", lastException.get(), equalTo(null)); - assertThat("breaker was tripped", tripped.get(), equalTo(true)); - assertThat("breaker was tripped at least once", breaker.getTrippedCount(), greaterThanOrEqualTo(1L)); - } - - public void testThreadedUpdatesToChildBreakerWithParentLimit() throws Exception { - final int NUM_THREADS = scaledRandomIntBetween(3, 15); - final int BYTES_PER_THREAD = scaledRandomIntBetween(500, 4500); - final int parentLimit = (BYTES_PER_THREAD * NUM_THREADS) - 2; - final int childLimit = parentLimit + 10; - final Thread[] threads = new Thread[NUM_THREADS]; - final AtomicInteger tripped = new AtomicInteger(0); - final AtomicReference lastException = new AtomicReference<>(null); - - final AtomicInteger parentTripped = new AtomicInteger(0); - final AtomicReference breakerRef = new AtomicReference<>(null); - final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { - - @Override - public CircuitBreaker getBreaker(String name) { - return breakerRef.get(); - } - - @Override - public void checkParentLimit(String label) throws CircuitBreakingException { - // Parent will trip right before regular breaker would trip - if (getBreaker(CircuitBreaker.REQUEST).getUsed() > parentLimit) { - parentTripped.incrementAndGet(); - logger.info("--> parent tripped"); - throw new CircuitBreakingException("parent tripped"); - } - } - }; - final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, childLimit, 1.0); - final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker(settings, logger, - (HierarchyCircuitBreakerService)service, CircuitBreaker.REQUEST); - breakerRef.set(breaker); - - for (int i = 0; i < NUM_THREADS; i++) { - threads[i] = new Thread(new Runnable() { - @Override - public void run() { - for (int j = 0; j < BYTES_PER_THREAD; j++) { - try { - breaker.addEstimateBytesAndMaybeBreak(1L, "test"); - } catch (CircuitBreakingException e) { - tripped.incrementAndGet(); - } catch (Exception e) { - lastException.set(e); - } - } - } - }); - } - - logger.info("--> NUM_THREADS: [{}], BYTES_PER_THREAD: [{}], TOTAL_BYTES: [{}], PARENT_LIMIT: [{}], CHILD_LIMIT: [{}]", - NUM_THREADS, BYTES_PER_THREAD, (BYTES_PER_THREAD * NUM_THREADS), parentLimit, childLimit); - - logger.info("--> starting threads..."); - for (Thread t : threads) { - t.start(); - } - - for (Thread t : threads) { - t.join(); - } - - logger.info("--> child breaker: used: {}, limit: {}", breaker.getUsed(), breaker.getLimit()); - logger.info("--> parent tripped: {}, total trip count: {} (expecting 1-2 for each)", parentTripped.get(), tripped.get()); - assertThat("no other exceptions were thrown", lastException.get(), equalTo(null)); - assertThat("breaker should be reset back to the parent limit after parent breaker trips", - breaker.getUsed(), greaterThanOrEqualTo((long)parentLimit - NUM_THREADS)); - assertThat("parent breaker was tripped at least once", parentTripped.get(), greaterThanOrEqualTo(1)); - assertThat("total breaker was tripped at least once", tripped.get(), greaterThanOrEqualTo(1)); - } - public void testConstantFactor() throws Exception { final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue(15), 1.6, logger); String field = "myfield"; @@ -243,40 +104,4 @@ public void testConstantFactor() throws Exception { assertThat(cbe.getMessage().contains("field [" + field + "]"), equalTo(true)); } } - - /** - * Test that a breaker correctly redistributes to a different breaker, in - * this case, the request breaker borrows space from the fielddata breaker - */ - public void testBorrowingSiblingBreakerMemory() throws Exception { - Settings clusterSettings = Settings.builder() - .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "200mb") - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb") - .build(); - try (CircuitBreakerService service = new HierarchyCircuitBreakerService(clusterSettings, - new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { - CircuitBreaker requestCircuitBreaker = service.getBreaker(MemoryCircuitBreaker.REQUEST); - CircuitBreaker fieldDataCircuitBreaker = service.getBreaker(MemoryCircuitBreaker.FIELDDATA); - - assertEquals(new ByteSizeValue(200, ByteSizeUnit.MB).getBytes(), - service.stats().getStats(MemoryCircuitBreaker.PARENT).getLimit()); - assertEquals(new ByteSizeValue(150, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getLimit()); - assertEquals(new ByteSizeValue(150, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getLimit()); - - double fieldDataUsedBytes = fieldDataCircuitBreaker - .addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should not break"); - assertEquals(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), fieldDataUsedBytes, 0.0); - double requestUsedBytes = requestCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), - "should not break"); - assertEquals(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), requestUsedBytes, 0.0); - requestUsedBytes = requestCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), - "should not break"); - assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).getBytes(), requestUsedBytes, 0.0); - CircuitBreakingException exception = expectThrows(CircuitBreakingException.class, () -> requestCircuitBreaker - .addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should break")); - assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [should break] would be")); - assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]")); - } - } } diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java new file mode 100644 index 0000000000000..a03739b2d9a94 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -0,0 +1,202 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.breaker; + + +import org.elasticsearch.common.breaker.ChildMemoryCircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.breaker.MemoryCircuitBreaker; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class HierarchyCircuitBreakerServiceTests extends ESTestCase { + public void testThreadedUpdatesToChildBreaker() throws Exception { + final int NUM_THREADS = scaledRandomIntBetween(3, 15); + final int BYTES_PER_THREAD = scaledRandomIntBetween(500, 4500); + final Thread[] threads = new Thread[NUM_THREADS]; + final AtomicBoolean tripped = new AtomicBoolean(false); + final AtomicReference lastException = new AtomicReference<>(null); + + final AtomicReference breakerRef = new AtomicReference<>(null); + final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { + + @Override + public CircuitBreaker getBreaker(String name) { + return breakerRef.get(); + } + + @Override + public void checkParentLimit(String label) throws CircuitBreakingException { + // never trip + } + }; + final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, (BYTES_PER_THREAD * NUM_THREADS) - 1, 1.0); + final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker(settings, logger, + (HierarchyCircuitBreakerService)service, CircuitBreaker.REQUEST); + breakerRef.set(breaker); + + for (int i = 0; i < NUM_THREADS; i++) { + threads[i] = new Thread(() -> { + for (int j = 0; j < BYTES_PER_THREAD; j++) { + try { + breaker.addEstimateBytesAndMaybeBreak(1L, "test"); + } catch (CircuitBreakingException e) { + if (tripped.get()) { + assertThat("tripped too many times", true, equalTo(false)); + } else { + assertThat(tripped.compareAndSet(false, true), equalTo(true)); + } + } catch (Exception e) { + lastException.set(e); + } + } + }); + + threads[i].start(); + } + + for (Thread t : threads) { + t.join(); + } + + assertThat("no other exceptions were thrown", lastException.get(), equalTo(null)); + assertThat("breaker was tripped", tripped.get(), equalTo(true)); + assertThat("breaker was tripped at least once", breaker.getTrippedCount(), greaterThanOrEqualTo(1L)); + } + + public void testThreadedUpdatesToChildBreakerWithParentLimit() throws Exception { + final int NUM_THREADS = scaledRandomIntBetween(3, 15); + final int BYTES_PER_THREAD = scaledRandomIntBetween(500, 4500); + final int parentLimit = (BYTES_PER_THREAD * NUM_THREADS) - 2; + final int childLimit = parentLimit + 10; + final Thread[] threads = new Thread[NUM_THREADS]; + final AtomicInteger tripped = new AtomicInteger(0); + final AtomicReference lastException = new AtomicReference<>(null); + + final AtomicInteger parentTripped = new AtomicInteger(0); + final AtomicReference breakerRef = new AtomicReference<>(null); + final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { + + @Override + public CircuitBreaker getBreaker(String name) { + return breakerRef.get(); + } + + @Override + public void checkParentLimit(String label) throws CircuitBreakingException { + // Parent will trip right before regular breaker would trip + if (getBreaker(CircuitBreaker.REQUEST).getUsed() > parentLimit) { + parentTripped.incrementAndGet(); + logger.info("--> parent tripped"); + throw new CircuitBreakingException("parent tripped"); + } + } + }; + final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, childLimit, 1.0); + final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker(settings, logger, + (HierarchyCircuitBreakerService)service, CircuitBreaker.REQUEST); + breakerRef.set(breaker); + + for (int i = 0; i < NUM_THREADS; i++) { + threads[i] = new Thread(() -> { + for (int j = 0; j < BYTES_PER_THREAD; j++) { + try { + breaker.addEstimateBytesAndMaybeBreak(1L, "test"); + } catch (CircuitBreakingException e) { + tripped.incrementAndGet(); + } catch (Exception e) { + lastException.set(e); + } + } + }); + } + + logger.info("--> NUM_THREADS: [{}], BYTES_PER_THREAD: [{}], TOTAL_BYTES: [{}], PARENT_LIMIT: [{}], CHILD_LIMIT: [{}]", + NUM_THREADS, BYTES_PER_THREAD, (BYTES_PER_THREAD * NUM_THREADS), parentLimit, childLimit); + + logger.info("--> starting threads..."); + for (Thread t : threads) { + t.start(); + } + + for (Thread t : threads) { + t.join(); + } + + logger.info("--> child breaker: used: {}, limit: {}", breaker.getUsed(), breaker.getLimit()); + logger.info("--> parent tripped: {}, total trip count: {} (expecting 1-2 for each)", parentTripped.get(), tripped.get()); + assertThat("no other exceptions were thrown", lastException.get(), equalTo(null)); + assertThat("breaker should be reset back to the parent limit after parent breaker trips", + breaker.getUsed(), greaterThanOrEqualTo((long)parentLimit - NUM_THREADS)); + assertThat("parent breaker was tripped at least once", parentTripped.get(), greaterThanOrEqualTo(1)); + assertThat("total breaker was tripped at least once", tripped.get(), greaterThanOrEqualTo(1)); + } + + + /** + * Test that a breaker correctly redistributes to a different breaker, in + * this case, the request breaker borrows space from the fielddata breaker + */ + public void testBorrowingSiblingBreakerMemory() throws Exception { + Settings clusterSettings = Settings.builder() + .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "200mb") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "150mb") + .build(); + try (CircuitBreakerService service = new HierarchyCircuitBreakerService(clusterSettings, + new ClusterSettings(clusterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))) { + CircuitBreaker requestCircuitBreaker = service.getBreaker(MemoryCircuitBreaker.REQUEST); + CircuitBreaker fieldDataCircuitBreaker = service.getBreaker(MemoryCircuitBreaker.FIELDDATA); + + assertEquals(new ByteSizeValue(200, ByteSizeUnit.MB).getBytes(), + service.stats().getStats(MemoryCircuitBreaker.PARENT).getLimit()); + assertEquals(new ByteSizeValue(150, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getLimit()); + assertEquals(new ByteSizeValue(150, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getLimit()); + + double fieldDataUsedBytes = fieldDataCircuitBreaker + .addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should not break"); + assertEquals(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), fieldDataUsedBytes, 0.0); + double requestUsedBytes = requestCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), + "should not break"); + assertEquals(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), requestUsedBytes, 0.0); + requestUsedBytes = requestCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), + "should not break"); + assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).getBytes(), requestUsedBytes, 0.0); + CircuitBreakingException exception = expectThrows(CircuitBreakingException.class, () -> requestCircuitBreaker + .addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should break")); + assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [should break] would be")); + assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]")); + } + } +} From 3d53daeb2f58c5b6986f80a238f8b28d90447f88 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 3 Jul 2018 09:17:16 +0200 Subject: [PATCH 13/36] Account for XContent overhead in in-flight breaker So far the in-flight request circuit breaker has only accounted for the on-the-wire representation of a request. However, we convert the raw request into XContent internally which increases the overhead. Therefore, we increase the value of the corresponding setting `network.breaker.inflight_requests.overhead` from one to two. While this value is still rather conservative (we assume that the representation as structured objects has no overhead compared to the byte[]), it is closer to reality than the current value. Relates #31613 --- .../migration/migrate_7_0/indices.asciidoc | 6 ++++++ .../modules/indices/circuit_breaker.asciidoc | 6 ++++-- .../breaker/HierarchyCircuitBreakerService.java | 2 +- .../elasticsearch/rest/RestControllerTests.java | 16 ++++++++-------- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/docs/reference/migration/migrate_7_0/indices.asciidoc b/docs/reference/migration/migrate_7_0/indices.asciidoc index db0c0ede466d0..b03a6014d5bc5 100644 --- a/docs/reference/migration/migrate_7_0/indices.asciidoc +++ b/docs/reference/migration/migrate_7_0/indices.asciidoc @@ -64,3 +64,9 @@ The following previously deprecated url parameter have been removed: * `filter_cache` - use `query` instead * `request_cache` - use `request` instead * `field_data` - use `fielddata` instead + +==== `network.breaker.inflight_requests.overhead` increased to 2 + +Previously the in flight requests circuit breaker considered only the raw byte representation. +By bumping the value of `network.breaker.inflight_requests.overhead` from 1 to 2, this circuit +breaker considers now also the memory overhead of representing the request as a structured object. \ No newline at end of file diff --git a/docs/reference/modules/indices/circuit_breaker.asciidoc b/docs/reference/modules/indices/circuit_breaker.asciidoc index 03cdb307b9f1e..559137a821036 100644 --- a/docs/reference/modules/indices/circuit_breaker.asciidoc +++ b/docs/reference/modules/indices/circuit_breaker.asciidoc @@ -60,7 +60,9 @@ request) from exceeding a certain amount of memory. The in flight requests circuit breaker allows Elasticsearch to limit the memory usage of all currently active incoming requests on transport or HTTP level from exceeding a certain amount of -memory on a node. The memory usage is based on the content length of the request itself. +memory on a node. The memory usage is based on the content length of the request itself. This +circuit breaker also considers that memory is not only needed for representing the raw request but +also as a structured object which is reflected by default overhead. `network.breaker.inflight_requests.limit`:: @@ -70,7 +72,7 @@ memory on a node. The memory usage is based on the content length of the request `network.breaker.inflight_requests.overhead`:: A constant that all in flight requests estimations are multiplied with to determine a - final estimation. Defaults to 1 + final estimation. Defaults to 2. [[accounting-circuit-breaker]] [float] diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 9ea8a3df29492..e3a914c730ec8 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -73,7 +73,7 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { public static final Setting IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.memorySizeSetting("network.breaker.inflight_requests.limit", "100%", Property.Dynamic, Property.NodeScope); public static final Setting IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING = - Setting.doubleSetting("network.breaker.inflight_requests.overhead", 1.0d, 0.0d, Property.Dynamic, Property.NodeScope); + Setting.doubleSetting("network.breaker.inflight_requests.overhead", 2.0d, 0.0d, Property.Dynamic, Property.NodeScope); public static final Setting IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("network.breaker.inflight_requests.type", "memory", CircuitBreaker.Type::parseValue, Property.NodeScope); diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index a090cc40b6857..348b85a8ba4a1 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -239,7 +239,7 @@ public boolean canTripCircuitBreaker() { public void testDispatchRequestAddsAndFreesBytesOnSuccess() { int contentLength = BREAKER_LIMIT.bytesAsInt(); - String content = randomAlphaOfLength(contentLength); + String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, XContentType.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK); @@ -251,7 +251,7 @@ public void testDispatchRequestAddsAndFreesBytesOnSuccess() { public void testDispatchRequestAddsAndFreesBytesOnError() { int contentLength = BREAKER_LIMIT.bytesAsInt(); - String content = randomAlphaOfLength(contentLength); + String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/error", content, XContentType.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST); @@ -263,7 +263,7 @@ public void testDispatchRequestAddsAndFreesBytesOnError() { public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnError() { int contentLength = BREAKER_LIMIT.bytesAsInt(); - String content = randomAlphaOfLength(contentLength); + String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); // we will produce an error in the rest handler and one more when sending the error response RestRequest request = testRestRequest("/error", content, XContentType.JSON); ExceptionThrowingChannel channel = new ExceptionThrowingChannel(request, true); @@ -276,7 +276,7 @@ public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnError() { public void testDispatchRequestLimitsBytes() { int contentLength = BREAKER_LIMIT.bytesAsInt() + 1; - String content = randomAlphaOfLength(contentLength); + String content = randomAlphaOfLength((int) Math.round(contentLength / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, XContentType.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.SERVICE_UNAVAILABLE); @@ -287,7 +287,7 @@ public void testDispatchRequestLimitsBytes() { } public void testDispatchRequiresContentTypeForRequestsWithContent() { - String content = randomAlphaOfLengthBetween(1, BREAKER_LIMIT.bytesAsInt()); + String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, null); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.NOT_ACCEPTABLE); restController = new RestController( @@ -312,7 +312,7 @@ public void testDispatchDoesNotRequireContentTypeForRequestsWithoutContent() { } public void testDispatchFailsWithPlainText() { - String content = randomAlphaOfLengthBetween(1, BREAKER_LIMIT.bytesAsInt()); + String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) .withContent(new BytesArray(content), null).withPath("/foo") .withHeaders(Collections.singletonMap("Content-Type", Collections.singletonList("text/plain"))).build(); @@ -342,7 +342,7 @@ public void testDispatchUnsupportedContentType() { public void testDispatchWorksWithNewlineDelimitedJson() { final String mimeType = "application/x-ndjson"; - String content = randomAlphaOfLengthBetween(1, BREAKER_LIMIT.bytesAsInt()); + String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) .withContent(new BytesArray(content), null).withPath("/foo") .withHeaders(Collections.singletonMap("Content-Type", Collections.singletonList(mimeType))).build(); @@ -366,7 +366,7 @@ public boolean supportsContentStream() { public void testDispatchWithContentStream() { final String mimeType = randomFrom("application/json", "application/smile"); - String content = randomAlphaOfLengthBetween(1, BREAKER_LIMIT.bytesAsInt()); + String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); final List contentTypeHeader = Collections.singletonList(mimeType); FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) .withContent(new BytesArray(content), RestRequest.parseContentType(contentTypeHeader)).withPath("/foo") From 49b977ba7ca482d518f3001221a117baa3ffe2ae Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Tue, 3 Jul 2018 11:31:48 +0300 Subject: [PATCH 14/36] resolveHasher defaults to NOOP (#31723) * Default resolveFromHash to Hasher.NOOP This changes the default behavior when resolving the hashing algorithm from unrecognised hash strings, which was introduced in #31234 A hash string that doesn't start with an algorithm identifier can either be a malformed/corrupted hash or a plaintext password when Hasher.NOOP is used(against warnings). Do not make assumptions about which of the two is true for such strings and default to Hasher.NOOP. Hash verification will subsequently fail for malformed hashes. Finally, do not log the potentially malformed hash as this can very well be a plaintext password. Resolves #31697 Reverts 58cf95a06f1defd31b16c831708ca32a5b445f98 --- .../xpack/core/security/authc/support/Hasher.java | 15 ++++++--------- .../xpack/security/authc/file/FileRealmTests.java | 2 -- .../xpack/security/authc/support/HasherTests.java | 5 +---- 3 files changed, 7 insertions(+), 15 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java index f5275de5fc887..d12547bd90645 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java @@ -438,7 +438,8 @@ public static Hasher resolve(String name) { /** * Returns a {@link Hasher} instance that can be used to verify the {@code hash} by inspecting the - * hash prefix and determining the algorithm used for its generation. + * hash prefix and determining the algorithm used for its generation. If no specific algorithm + * prefix, can be determined {@code Hasher.NOOP} is returned. * * @param hash the char array from which the hashing algorithm is to be deduced * @return the hasher that can be used for validation @@ -457,7 +458,8 @@ public static Hasher resolveFromHash(char[] hash) { } else if (CharArrays.charsBeginsWith(SSHA256_PREFIX, hash)) { return Hasher.SSHA256; } else { - throw new IllegalArgumentException("unknown hash format for hash [" + new String(hash) + "]"); + // This is either a non hashed password from cache or a corrupted hash string. + return Hasher.NOOP; } } @@ -471,13 +473,8 @@ public static Hasher resolveFromHash(char[] hash) { * @return true if the hash corresponds to the data, false otherwise */ public static boolean verifyHash(SecureString data, char[] hash) { - try { - final Hasher hasher = resolveFromHash(hash); - return hasher.verify(data, hash); - } catch (IllegalArgumentException e) { - // The password hash format is invalid, we're unable to verify password - return false; - } + final Hasher hasher = resolveFromHash(hash); + return hasher.verify(data, hash); } private static char[] getPbkdf2Hash(SecureString data, int cost) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index b06697bc4eb4f..f5dad8b7c684c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -84,13 +84,11 @@ public void testAuthenticate() throws Exception { assertThat(user.roles(), arrayContaining("role1", "role2")); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31697") public void testAuthenticateCaching() throws Exception { Settings settings = Settings.builder() .put("cache.hash_algo", Hasher.values()[randomIntBetween(0, Hasher.values().length - 1)].name().toLowerCase(Locale.ROOT)).build(); RealmConfig config = new RealmConfig("file-test", settings, globalSettings, TestEnvironment.newEnvironment(globalSettings), threadContext); - when(userPasswdStore.verifyPassword(eq("user1"), eq(new SecureString("test123")), any(Supplier.class))) .thenAnswer(VERIFY_PASSWORD_ANSWER); when(userRolesStore.roles("user1")).thenReturn(new String[]{"role1", "role2"}); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java index c303c0ab4683a..6086dc642d22f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java @@ -128,10 +128,7 @@ public void testResolveFromHash() { assertThat(Hasher.resolveFromHash( "{PBKDF2}1000000$UuyhtjDEzWmE2wyY80akZKPWWpy2r2X50so41YML82U=$WFasYLelqbjQwt3EqFlUcwHiC38EZC45Iu/Iz0xL1GQ=".toCharArray()), sameInstance(Hasher.PBKDF2_1000000)); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - Hasher.resolveFromHash("{GBGN}cGR8S2vr3FuFuOpQitR".toCharArray()); - }); - assertThat(e.getMessage(), containsString("unknown hash format for hash")); + assertThat(Hasher.resolveFromHash("notavalidhashformat".toCharArray()), sameInstance(Hasher.NOOP)); } private static void testHasherSelfGenerated(Hasher hasher) { From ce78925732f17a3628a356133cbd75b71fc79c57 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Tue, 3 Jul 2018 13:55:33 +0300 Subject: [PATCH 15/36] JDBC: Fix stackoverflow on getObject and timestamp conversion (#31735) StackOverflowError fix in JdbcResultSet getObject method. Fix Timestamp conversion bug when getting the value of a time column. --- .../xpack/sql/jdbc/jdbc/JdbcResultSet.java | 2 +- .../xpack/sql/jdbc/jdbc/TypeConverter.java | 2 +- .../sql/jdbc/jdbc/TypeConverterTests.java | 5 +- .../qa/sql/jdbc/JdbcIntegrationTestCase.java | 6 +- .../xpack/qa/sql/jdbc/ResultSetTestCase.java | 82 +++++++++++++++++++ 5 files changed, 92 insertions(+), 5 deletions(-) create mode 100644 x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java index 351ac73a88f28..201ae251ca0df 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcResultSet.java @@ -344,7 +344,7 @@ public T getObject(int columnIndex, Class type) throws SQLException { throw new SQLException("type is null"); } - return getObject(columnIndex, type); + return convert(columnIndex, type); } private T convert(int columnIndex, Class type) throws SQLException { diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java index 1e24a03c8b31c..782a17257d424 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java @@ -254,7 +254,7 @@ static Object convert(Object v, JDBCType columnType) throws SQLException { case REAL: return floatValue(v); // Float might be represented as string for infinity and NaN values case TIMESTAMP: - return ((Number) v).longValue(); + return new Timestamp(((Number) v).longValue()); default: throw new SQLException("Unexpected column type [" + columnType.getName() + "]"); diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java index 0182ea63f637d..51c130a39118e 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java @@ -14,6 +14,7 @@ import org.joda.time.ReadableDateTime; import java.sql.JDBCType; +import java.sql.Timestamp; import static org.hamcrest.Matchers.instanceOf; @@ -41,8 +42,8 @@ public void testDoubleAsNative() throws Exception { public void testTimestampAsNative() throws Exception { DateTime now = DateTime.now(); - assertThat(convertAsNative(now, JDBCType.TIMESTAMP), instanceOf(Long.class)); - assertEquals(now.getMillis(), convertAsNative(now, JDBCType.TIMESTAMP)); + assertThat(convertAsNative(now, JDBCType.TIMESTAMP), instanceOf(Timestamp.class)); + assertEquals(now.getMillis(), ((Timestamp) convertAsNative(now, JDBCType.TIMESTAMP)).getTime()); } private Object convertAsNative(Object value, JDBCType type) throws Exception { diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java index a2b524c20b070..a339222445a1a 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java @@ -82,7 +82,11 @@ protected Connection useDataSource() throws SQLException { } public static void index(String index, CheckedConsumer body) throws IOException { - Request request = new Request("PUT", "/" + index + "/doc/1"); + index(index, "1", body); + } + + public static void index(String index, String documentId, CheckedConsumer body) throws IOException { + Request request = new Request("PUT", "/" + index + "/doc/" + documentId); request.addParameter("refresh", "true"); XContentBuilder builder = JsonXContent.contentBuilder().startObject(); body.accept(builder); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java new file mode 100644 index 0000000000000..861a6dccaba57 --- /dev/null +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ResultSetTestCase.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.jdbc; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Date; + +public class ResultSetTestCase extends JdbcIntegrationTestCase { + public void testGettingTimestamp() throws Exception { + long randomMillis = randomLongBetween(0, System.currentTimeMillis()); + + index("library", "1", builder -> { + builder.field("name", "Don Quixote"); + builder.field("page_count", 1072); + builder.timeField("release_date", new Date(randomMillis)); + builder.timeField("republish_date", null); + }); + index("library", "2", builder -> { + builder.field("name", "1984"); + builder.field("page_count", 328); + builder.timeField("release_date", new Date(-649036800000L)); + builder.timeField("republish_date", new Date(599616000000L)); + }); + + try (Connection connection = esJdbc()) { + try (PreparedStatement statement = connection.prepareStatement("SELECT name, release_date, republish_date FROM library")) { + try (ResultSet results = statement.executeQuery()) { + ResultSetMetaData resultSetMetaData = results.getMetaData(); + + results.next(); + assertEquals(3, resultSetMetaData.getColumnCount()); + assertEquals(randomMillis, results.getTimestamp("release_date").getTime()); + assertEquals(randomMillis, results.getTimestamp(2).getTime()); + assertTrue(results.getObject(2) instanceof Timestamp); + assertEquals(randomMillis, ((Timestamp) results.getObject("release_date")).getTime()); + + assertNull(results.getTimestamp(3)); + assertNull(results.getObject("republish_date")); + + assertTrue(results.next()); + assertEquals(599616000000L, results.getTimestamp("republish_date").getTime()); + assertEquals(-649036800000L, ((Timestamp) results.getObject(2)).getTime()); + + assertFalse(results.next()); + } + } + } + } + + /* + * Checks StackOverflowError fix for https://github.com/elastic/elasticsearch/pull/31735 + */ + public void testNoInfiniteRecursiveGetObjectCalls() throws SQLException, IOException { + index("library", "1", builder -> { + builder.field("name", "Don Quixote"); + builder.field("page_count", 1072); + }); + Connection conn = esJdbc(); + PreparedStatement statement = conn.prepareStatement("SELECT * FROM library"); + ResultSet results = statement.executeQuery(); + + try { + results.next(); + results.getObject("name"); + results.getObject("page_count"); + results.getObject(1); + results.getObject(1, String.class); + results.getObject("page_count", Integer.class); + } catch (StackOverflowError soe) { + fail("Infinite recursive call on getObject() method"); + } + } +} From a5fd4a77098ff533c636f6be779ccd90965967f5 Mon Sep 17 00:00:00 2001 From: Sohaib Iftikhar Date: Tue, 3 Jul 2018 14:08:50 +0200 Subject: [PATCH 16/36] Implemented XContent serialisation for GetIndexResponse (#31675) This PR does the server side work for adding the Get Index API to the REST high-level-client, namely moving resolving default settings to the transport action. A follow up would be the client side changes. --- build.gradle | 4 +- .../client/GetAliasesResponseTests.java | 2 +- .../elasticsearch/action/ActionModule.java | 2 +- .../admin/indices/get/GetIndexRequest.java | 13 +- .../admin/indices/get/GetIndexResponse.java | 273 +++++++++++++++++- .../indices/get/TransportGetIndexAction.java | 23 +- .../admin/indices/RestGetIndicesAction.java | 113 +------- .../indices/get/GetIndexActionTests.java | 144 +++++++++ .../indices/get/GetIndexResponseTests.java | 194 +++++++++++++ .../mapping/get/GetMappingsResponseTests.java | 10 +- 10 files changed, 652 insertions(+), 126 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexResponseTests.java diff --git a/build.gradle b/build.gradle index a29c783422172..67128426eebd0 100644 --- a/build.gradle +++ b/build.gradle @@ -170,8 +170,8 @@ task verifyVersions { * the enabled state of every bwc task. It should be set back to true * after the backport of the backcompat code is complete. */ -final boolean bwc_tests_enabled = true -final String bwc_tests_disabled_issue = "" /* place a PR link here when commiting bwc changes */ +final boolean bwc_tests_enabled = false +final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/31675" /* place a PR link here when commiting bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/GetAliasesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/GetAliasesResponseTests.java index 5f3354ad2b95d..c5bc74e7517c3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/GetAliasesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/GetAliasesResponseTests.java @@ -59,7 +59,7 @@ private static Map> createIndicesAliasesMap(int min, return map; } - private static AliasMetaData createAliasMetaData() { + public static AliasMetaData createAliasMetaData() { AliasMetaData.Builder builder = AliasMetaData.builder(randomAlphaOfLengthBetween(3, 10)); if (randomBoolean()) { builder.routing(randomAlphaOfLengthBetween(3, 10)); diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 48e1cef08d00a..58efce77c9fd8 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -557,7 +557,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRestoreSnapshotAction(settings, restController)); registerHandler.accept(new RestDeleteSnapshotAction(settings, restController)); registerHandler.accept(new RestSnapshotsStatusAction(settings, restController)); - registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter)); + registerHandler.accept(new RestGetIndicesAction(settings, restController)); registerHandler.accept(new RestIndicesStatsAction(settings, restController)); registerHandler.accept(new RestIndicesSegmentsAction(settings, restController)); registerHandler.accept(new RestIndicesShardStoresAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 02a7a8ad79fbe..fb8bd6b5684be 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.get; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.info.ClusterInfoRequest; import org.elasticsearch.common.io.stream.StreamInput; @@ -80,6 +81,9 @@ public GetIndexRequest(StreamInput in) throws IOException { features[i] = Feature.fromId(in.readByte()); } humanReadable = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + includeDefaults = in.readBoolean(); + } } public GetIndexRequest features(Feature... features) { @@ -119,8 +123,7 @@ public boolean humanReadable() { /** * Sets the value of "include_defaults". - * Used only by the high-level REST client. - * + * * @param includeDefaults value of "include_defaults" to be set. * @return this request */ @@ -131,8 +134,7 @@ public GetIndexRequest includeDefaults(boolean includeDefaults) { /** * Whether to return all default settings for each of the indices. - * Used only by the high-level REST client. - * + * * @return true if defaults settings for each of the indices need to returned; * false otherwise. */ @@ -153,6 +155,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(feature.id); } out.writeBoolean(humanReadable); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeBoolean(includeDefaults); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 36bfa81a33416..e2b72077b7f21 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -20,33 +20,50 @@ package org.elasticsearch.action.admin.indices.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; /** - * A response for a delete index action. + * A response for a get index action. */ -public class GetIndexResponse extends ActionResponse { +public class GetIndexResponse extends ActionResponse implements ToXContentObject { private ImmutableOpenMap> mappings = ImmutableOpenMap.of(); private ImmutableOpenMap> aliases = ImmutableOpenMap.of(); private ImmutableOpenMap settings = ImmutableOpenMap.of(); + private ImmutableOpenMap defaultSettings = ImmutableOpenMap.of(); private String[] indices; GetIndexResponse(String[] indices, - ImmutableOpenMap> mappings, - ImmutableOpenMap> aliases, ImmutableOpenMap settings) { + ImmutableOpenMap> mappings, + ImmutableOpenMap> aliases, + ImmutableOpenMap settings, + ImmutableOpenMap defaultSettings) { this.indices = indices; + // to have deterministic order + Arrays.sort(indices); if (mappings != null) { this.mappings = mappings; } @@ -56,6 +73,9 @@ public class GetIndexResponse extends ActionResponse { if (settings != null) { this.settings = settings; } + if (defaultSettings != null) { + this.defaultSettings = defaultSettings; + } } GetIndexResponse() { @@ -89,14 +109,51 @@ public ImmutableOpenMap settings() { return settings; } + /** + * If the originating {@link GetIndexRequest} object was configured to include + * defaults, this will contain a mapping of index name to {@link Settings} objects. + * The returned {@link Settings} objects will contain only those settings taking + * effect as defaults. Any settings explicitly set on the index will be available + * via {@link #settings()}. + * See also {@link GetIndexRequest#includeDefaults(boolean)} + */ + public ImmutableOpenMap defaultSettings() { + return defaultSettings; + } + public ImmutableOpenMap getSettings() { return settings(); } + /** + * Returns the string value for the specified index and setting. If the includeDefaults flag was not set or set to + * false on the {@link GetIndexRequest}, this method will only return a value where the setting was explicitly set + * on the index. If the includeDefaults flag was set to true on the {@link GetIndexRequest}, this method will fall + * back to return the default value if the setting was not explicitly set. + */ + public String getSetting(String index, String setting) { + Settings indexSettings = settings.get(index); + if (setting != null) { + if (indexSettings != null && indexSettings.hasValue(setting)) { + return indexSettings.get(setting); + } else { + Settings defaultIndexSettings = defaultSettings.get(index); + if (defaultIndexSettings != null) { + return defaultIndexSettings.get(setting); + } else { + return null; + } + } + } else { + return null; + } + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); this.indices = in.readStringArray(); + int mappingsSize = in.readVInt(); ImmutableOpenMap.Builder> mappingsMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < mappingsSize; i++) { @@ -109,6 +166,7 @@ public void readFrom(StreamInput in) throws IOException { mappingsMapBuilder.put(key, mappingEntryBuilder.build()); } mappings = mappingsMapBuilder.build(); + int aliasesSize = in.readVInt(); ImmutableOpenMap.Builder> aliasesMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < aliasesSize; i++) { @@ -121,6 +179,7 @@ public void readFrom(StreamInput in) throws IOException { aliasesMapBuilder.put(key, Collections.unmodifiableList(aliasEntryBuilder)); } aliases = aliasesMapBuilder.build(); + int settingsSize = in.readVInt(); ImmutableOpenMap.Builder settingsMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < settingsSize; i++) { @@ -128,6 +187,15 @@ public void readFrom(StreamInput in) throws IOException { settingsMapBuilder.put(key, Settings.readSettingsFromStream(in)); } settings = settingsMapBuilder.build(); + + ImmutableOpenMap.Builder defaultSettingsMapBuilder = ImmutableOpenMap.builder(); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + int defaultSettingsSize = in.readVInt(); + for (int i = 0; i < defaultSettingsSize ; i++) { + defaultSettingsMapBuilder.put(in.readString(), Settings.readSettingsFromStream(in)); + } + } + defaultSettings = defaultSettingsMapBuilder.build(); } @Override @@ -156,5 +224,202 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(indexEntry.key); Settings.writeSettingsToStream(indexEntry.value, out); } + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeVInt(defaultSettings.size()); + for (ObjectObjectCursor indexEntry : defaultSettings) { + out.writeString(indexEntry.key); + Settings.writeSettingsToStream(indexEntry.value, out); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + for (final String index : indices) { + builder.startObject(index); + { + builder.startObject("aliases"); + List indexAliases = aliases.get(index); + if (indexAliases != null) { + for (final AliasMetaData alias : indexAliases) { + AliasMetaData.Builder.toXContent(alias, builder, params); + } + } + builder.endObject(); + + builder.startObject("mappings"); + ImmutableOpenMap indexMappings = mappings.get(index); + if (indexMappings != null) { + for (final ObjectObjectCursor typeEntry : indexMappings) { + builder.field(typeEntry.key); + builder.map(typeEntry.value.sourceAsMap()); + } + } + builder.endObject(); + + builder.startObject("settings"); + Settings indexSettings = settings.get(index); + if (indexSettings != null) { + indexSettings.toXContent(builder, params); + } + builder.endObject(); + + Settings defaultIndexSettings = defaultSettings.get(index); + if (defaultIndexSettings != null && defaultIndexSettings.isEmpty() == false) { + builder.startObject("defaults"); + defaultIndexSettings.toXContent(builder, params); + builder.endObject(); + } + } + builder.endObject(); + } + } + builder.endObject(); + return builder; + } + + private static List parseAliases(XContentParser parser) throws IOException { + List indexAliases = new ArrayList<>(); + // We start at START_OBJECT since parseIndexEntry ensures that + while (parser.nextToken() != Token.END_OBJECT) { + ensureExpectedToken(Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); + indexAliases.add(AliasMetaData.Builder.fromXContent(parser)); + } + return indexAliases; + } + + private static ImmutableOpenMap parseMappings(XContentParser parser) throws IOException { + ImmutableOpenMap.Builder indexMappings = ImmutableOpenMap.builder(); + // We start at START_OBJECT since parseIndexEntry ensures that + while (parser.nextToken() != Token.END_OBJECT) { + ensureExpectedToken(Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); + parser.nextToken(); + if (parser.currentToken() == Token.START_OBJECT) { + String mappingType = parser.currentName(); + indexMappings.put(mappingType, new MappingMetaData(mappingType, parser.map())); + } else if (parser.currentToken() == Token.START_ARRAY) { + parser.skipChildren(); + } + } + return indexMappings.build(); + } + + private static IndexEntry parseIndexEntry(XContentParser parser) throws IOException { + List indexAliases = null; + ImmutableOpenMap indexMappings = null; + Settings indexSettings = null; + Settings indexDefaultSettings = null; + // We start at START_OBJECT since fromXContent ensures that + while (parser.nextToken() != Token.END_OBJECT) { + ensureExpectedToken(Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); + parser.nextToken(); + if (parser.currentToken() == Token.START_OBJECT) { + switch (parser.currentName()) { + case "aliases": + indexAliases = parseAliases(parser); + break; + case "mappings": + indexMappings = parseMappings(parser); + break; + case "settings": + indexSettings = Settings.fromXContent(parser); + break; + case "defaults": + indexDefaultSettings = Settings.fromXContent(parser); + break; + default: + parser.skipChildren(); + } + } else if (parser.currentToken() == Token.START_ARRAY) { + parser.skipChildren(); + } + } + return new IndexEntry(indexAliases, indexMappings, indexSettings, indexDefaultSettings); + } + + // This is just an internal container to make stuff easier for returning + private static class IndexEntry { + List indexAliases = new ArrayList<>(); + ImmutableOpenMap indexMappings = ImmutableOpenMap.of(); + Settings indexSettings = Settings.EMPTY; + Settings indexDefaultSettings = Settings.EMPTY; + IndexEntry(List indexAliases, ImmutableOpenMap indexMappings, + Settings indexSettings, Settings indexDefaultSettings) { + if (indexAliases != null) this.indexAliases = indexAliases; + if (indexMappings != null) this.indexMappings = indexMappings; + if (indexSettings != null) this.indexSettings = indexSettings; + if (indexDefaultSettings != null) this.indexDefaultSettings = indexDefaultSettings; + } + } + + public static GetIndexResponse fromXContent(XContentParser parser) throws IOException { + ImmutableOpenMap.Builder> aliases = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> mappings = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder settings = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder defaultSettings = ImmutableOpenMap.builder(); + List indices = new ArrayList<>(); + + if (parser.currentToken() == null) { + parser.nextToken(); + } + ensureExpectedToken(Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + parser.nextToken(); + + while (!parser.isClosed()) { + if (parser.currentToken() == Token.START_OBJECT) { + // we assume this is an index entry + String indexName = parser.currentName(); + indices.add(indexName); + IndexEntry indexEntry = parseIndexEntry(parser); + // make the order deterministic + CollectionUtil.timSort(indexEntry.indexAliases, Comparator.comparing(AliasMetaData::alias)); + aliases.put(indexName, Collections.unmodifiableList(indexEntry.indexAliases)); + mappings.put(indexName, indexEntry.indexMappings); + settings.put(indexName, indexEntry.indexSettings); + if (indexEntry.indexDefaultSettings.isEmpty() == false) { + defaultSettings.put(indexName, indexEntry.indexDefaultSettings); + } + } else if (parser.currentToken() == Token.START_ARRAY) { + parser.skipChildren(); + } else { + parser.nextToken(); + } + } + return + new GetIndexResponse( + indices.toArray(new String[0]), mappings.build(), aliases.build(), + settings.build(), defaultSettings.build() + ); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o== null || getClass() != o.getClass()) return false; + GetIndexResponse that = (GetIndexResponse) o; + return Arrays.equals(indices, that.indices) && + Objects.equals(aliases, that.aliases) && + Objects.equals(mappings, that.mappings) && + Objects.equals(settings, that.settings) && + Objects.equals(defaultSettings, that.defaultSettings); + } + + @Override + public int hashCode() { + return + Objects.hash( + Arrays.hashCode(indices), + aliases, + mappings, + settings, + defaultSettings + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index b383c02be74a7..060c345454abb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -36,9 +36,11 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.common.settings.IndexScopedSettings; import java.io.IOException; import java.util.List; @@ -49,14 +51,19 @@ public class TransportGetIndexAction extends TransportClusterInfoAction { private final IndicesService indicesService; + private final IndexScopedSettings indexScopedSettings; + private final SettingsFilter settingsFilter; @Inject public TransportGetIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService) { + ThreadPool threadPool, SettingsFilter settingsFilter, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, + IndexScopedSettings indexScopedSettings) { super(settings, GetIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexRequest::new, indexNameExpressionResolver); this.indicesService = indicesService; + this.settingsFilter = settingsFilter; + this.indexScopedSettings = indexScopedSettings; } @Override @@ -82,6 +89,7 @@ protected void doMasterOperation(final GetIndexRequest request, String[] concret ImmutableOpenMap> mappingsResult = ImmutableOpenMap.of(); ImmutableOpenMap> aliasesResult = ImmutableOpenMap.of(); ImmutableOpenMap settings = ImmutableOpenMap.of(); + ImmutableOpenMap defaultSettings = ImmutableOpenMap.of(); Feature[] features = request.features(); boolean doneAliases = false; boolean doneMappings = false; @@ -109,14 +117,21 @@ protected void doMasterOperation(final GetIndexRequest request, String[] concret case SETTINGS: if (!doneSettings) { ImmutableOpenMap.Builder settingsMapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder defaultSettingsMapBuilder = ImmutableOpenMap.builder(); for (String index : concreteIndices) { Settings indexSettings = state.metaData().index(index).getSettings(); if (request.humanReadable()) { indexSettings = IndexMetaData.addHumanReadableSettings(indexSettings); } settingsMapBuilder.put(index, indexSettings); + if (request.includeDefaults()) { + Settings defaultIndexSettings = + settingsFilter.filter(indexScopedSettings.diff(indexSettings, Settings.EMPTY)); + defaultSettingsMapBuilder.put(index, defaultIndexSettings); + } } settings = settingsMapBuilder.build(); + defaultSettings = defaultSettingsMapBuilder.build(); doneSettings = true; } break; @@ -125,6 +140,8 @@ protected void doMasterOperation(final GetIndexRequest request, String[] concret throw new IllegalStateException("feature [" + feature + "] is not valid"); } } - listener.onResponse(new GetIndexResponse(concreteIndices, mappingsResult, aliasesResult, settings)); + listener.onResponse( + new GetIndexResponse(concreteIndices, mappingsResult, aliasesResult, settings, defaultSettings) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java index e9552d4752685..04fae0f30f6bf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java @@ -19,55 +19,35 @@ package org.elasticsearch.rest.action.admin.indices; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.xcontent.ToXContent.Params; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; -import java.util.List; import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; -import static org.elasticsearch.rest.RestStatus.OK; /** * The REST handler for get index and head index APIs. */ public class RestGetIndicesAction extends BaseRestHandler { - private final IndexScopedSettings indexScopedSettings; - private final SettingsFilter settingsFilter; public RestGetIndicesAction( final Settings settings, - final RestController controller, - final IndexScopedSettings indexScopedSettings, - final SettingsFilter settingsFilter) { + final RestController controller) { super(settings); - this.indexScopedSettings = indexScopedSettings; controller.registerHandler(GET, "/{index}", this); controller.registerHandler(HEAD, "/{index}", this); - this.settingsFilter = settingsFilter; } @Override @@ -82,93 +62,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getIndexRequest.indices(indices); getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); + getIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexRequest.masterNodeTimeout())); getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); - final boolean defaults = request.paramAsBoolean("include_defaults", false); - return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(final GetIndexResponse response, final XContentBuilder builder) throws Exception { - builder.startObject(); - { - for (final String index : response.indices()) { - builder.startObject(index); - { - for (final Feature feature : getIndexRequest.features()) { - switch (feature) { - case ALIASES: - writeAliases(response.aliases().get(index), builder, request); - break; - case MAPPINGS: - writeMappings(response.mappings().get(index), builder); - break; - case SETTINGS: - writeSettings(response.settings().get(index), builder, request, defaults); - break; - default: - throw new IllegalStateException("feature [" + feature + "] is not valid"); - } - } - } - builder.endObject(); - - } - } - builder.endObject(); - - return new BytesRestResponse(OK, builder); - } - - private void writeAliases( - final List aliases, - final XContentBuilder builder, - final Params params) throws IOException { - builder.startObject("aliases"); - { - if (aliases != null) { - for (final AliasMetaData alias : aliases) { - AliasMetaData.Builder.toXContent(alias, builder, params); - } - } - } - builder.endObject(); - } - - private void writeMappings(final ImmutableOpenMap mappings, final XContentBuilder builder) - throws IOException { - builder.startObject("mappings"); - { - if (mappings != null) { - for (final ObjectObjectCursor typeEntry : mappings) { - builder.field(typeEntry.key); - builder.map(typeEntry.value.sourceAsMap()); - } - } - } - builder.endObject(); - } - - private void writeSettings( - final Settings settings, - final XContentBuilder builder, - final Params params, - final boolean defaults) throws IOException { - builder.startObject("settings"); - { - settings.toXContent(builder, params); - } - builder.endObject(); - if (defaults) { - builder.startObject("defaults"); - { - settingsFilter - .filter(indexScopedSettings.diff(settings, RestGetIndicesAction.this.settings)) - .toXContent(builder, request); - } - builder.endObject(); - } - } - - }); + getIndexRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); + return channel -> client.admin().indices().getIndex(getIndexRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java new file mode 100644 index 0000000000000..02a98eacda0e9 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.get; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.index.Index; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; + +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +public class GetIndexActionTests extends ESSingleNodeTestCase { + + private TransportService transportService; + private ClusterService clusterService; + private IndicesService indicesService; + private ThreadPool threadPool; + private SettingsFilter settingsFilter; + private final String indexName = "test_index"; + + private TestTransportGetIndexAction getIndexAction; + + @Before + public void setUp() throws Exception { + super.setUp(); + + settingsFilter = new SettingsModule(Settings.EMPTY, Collections.emptyList(), Collections.emptyList()).getSettingsFilter(); + threadPool = new TestThreadPool("GetIndexActionTests"); + clusterService = getInstanceFromNode(ClusterService.class); + indicesService = getInstanceFromNode(IndicesService.class); + CapturingTransport capturingTransport = new CapturingTransport(); + transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> clusterService.localNode(), null, Collections.emptySet()); + transportService.start(); + transportService.acceptIncomingRequests(); + getIndexAction = new GetIndexActionTests.TestTransportGetIndexAction(); + } + + @After + public void tearDown() throws Exception { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + super.tearDown(); + } + + public void testIncludeDefaults() { + GetIndexRequest defaultsRequest = new GetIndexRequest().indices(indexName).includeDefaults(true); + getIndexAction.execute(null, defaultsRequest, ActionListener.wrap( + defaultsResponse -> { + assertNotNull( + "index.refresh_interval should be set as we are including defaults", + defaultsResponse.getSetting(indexName, "index.refresh_interval") + ); + }, exception -> { + throw new AssertionError(exception); + }) + ); + } + + public void testDoNotIncludeDefaults() { + GetIndexRequest noDefaultsRequest = new GetIndexRequest().indices(indexName); + getIndexAction.execute(null, noDefaultsRequest, ActionListener.wrap( + noDefaultsResponse -> { + assertNull( + "index.refresh_interval should be null as it was never set", + noDefaultsResponse.getSetting(indexName, "index.refresh_interval") + ); + }, exception -> { + throw new AssertionError(exception); + }) + ); + } + + class TestTransportGetIndexAction extends TransportGetIndexAction { + + TestTransportGetIndexAction() { + super(Settings.EMPTY, GetIndexActionTests.this.transportService, GetIndexActionTests.this.clusterService, + GetIndexActionTests.this.threadPool, settingsFilter, new ActionFilters(Collections.emptySet()), + new GetIndexActionTests.Resolver(Settings.EMPTY), indicesService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS); + } + + @Override + protected void doMasterOperation(GetIndexRequest request, String[] concreteIndices, ClusterState state, + ActionListener listener) { + ClusterState stateWithIndex = ClusterStateCreationUtils.state(indexName, 1, 1); + super.doMasterOperation(request, concreteIndices, stateWithIndex, listener); + } + } + + static class Resolver extends IndexNameExpressionResolver { + Resolver(Settings settings) { + super(settings); + } + + @Override + public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { + return request.indices(); + } + + @Override + public Index[] concreteIndices(ClusterState state, IndicesRequest request) { + Index[] out = new Index[request.indices().length]; + for (int x = 0; x < out.length; x++) { + out[x] = new Index(request.indices()[x], "_na_"); + } + return out; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexResponseTests.java new file mode 100644 index 0000000000000..3991442fd5b87 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexResponseTests.java @@ -0,0 +1,194 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.get; + +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponseTests; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponseTests; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.RandomCreateIndexGenerator; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.junit.Assert; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.function.Predicate; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; + +public class GetIndexResponseTests extends AbstractStreamableXContentTestCase { + + /** + * The following byte response was generated from the v6.3.0 tag + */ + private static final String TEST_6_3_0_RESPONSE_BYTES = + "AQhteV9pbmRleAEIbXlfaW5kZXgBA2RvYwNkb2OePID6KURGTACqVkrLTM1JiTdUsqpWKqksSFWyUiouKcrMS1eqrQUAAAD//" + + "wMAAAABCG15X2luZGV4AgZhbGlhczEAAQJyMQECcjEGYWxpYXMyAX8jNXYiREZMAKpWKkktylWyqlaqTE0sUrIyMjA0q60FAAAA//" + + "8DAAAAAQhteV9pbmRleAIYaW5kZXgubnVtYmVyX29mX3JlcGxpY2FzAAExFmluZGV4Lm51bWJlcl9vZl9zaGFyZHMAATI="; + private static final GetIndexResponse TEST_6_3_0_RESPONSE_INSTANCE = getExpectedTest630Response(); + + @Override + protected GetIndexResponse doParseInstance(XContentParser parser) throws IOException { + return GetIndexResponse.fromXContent(parser); + } + + @Override + protected GetIndexResponse createBlankInstance() { + return new GetIndexResponse(); + } + + @Override + protected GetIndexResponse createTestInstance() { + String[] indices = generateRandomStringArray(5, 5, false, false); + ImmutableOpenMap.Builder> mappings = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> aliases = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder settings = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder defaultSettings = ImmutableOpenMap.builder(); + IndexScopedSettings indexScopedSettings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS; + boolean includeDefaults = randomBoolean(); + for (String index: indices) { + mappings.put(index, GetMappingsResponseTests.createMappingsForIndex()); + + List aliasMetaDataList = new ArrayList<>(); + int aliasesNum = randomIntBetween(0, 3); + for (int i=0; i getRandomFieldsExcludeFilter() { + //we do not want to add new fields at the root (index-level), or inside the blocks + return + f -> f.equals("") || f.contains(".settings") || f.contains(".defaults") || f.contains(".mappings") || + f.contains(".aliases"); + } + + private static ImmutableOpenMap> getTestAliases(String indexName) { + ImmutableOpenMap.Builder> aliases = ImmutableOpenMap.builder(); + List indexAliases = new ArrayList<>(); + indexAliases.add(new AliasMetaData.Builder("alias1").routing("r1").build()); + indexAliases.add(new AliasMetaData.Builder("alias2").filter("{\"term\": {\"year\": 2016}}").build()); + aliases.put(indexName, Collections.unmodifiableList(indexAliases)); + return aliases.build(); + } + + private static ImmutableOpenMap> getTestMappings(String indexName) { + ImmutableOpenMap.Builder> mappings = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder indexMappings = ImmutableOpenMap.builder(); + try { + indexMappings.put( + "doc", + new MappingMetaData("doc", + Collections.singletonMap("field_1", Collections.singletonMap("type", "string")) + ) + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + mappings.put(indexName, indexMappings.build()); + return mappings.build(); + } + + private static ImmutableOpenMap getTestSettings(String indexName) { + ImmutableOpenMap.Builder settings = ImmutableOpenMap.builder(); + Settings.Builder indexSettings = Settings.builder(); + indexSettings.put(SETTING_NUMBER_OF_SHARDS, 2); + indexSettings.put(SETTING_NUMBER_OF_REPLICAS, 1); + settings.put(indexName, indexSettings.build()); + return settings.build(); + } + + private static GetIndexResponse getExpectedTest630Response() { + // The only difference between this snippet and the one used for generation TEST_6_3_0_RESPONSE_BYTES is the + // constructor for GetIndexResponse which also takes defaultSettings now. + String indexName = "my_index"; + String indices[] = { indexName }; + return + new GetIndexResponse( + indices, getTestMappings(indexName), getTestAliases(indexName), getTestSettings(indexName), + ImmutableOpenMap.of() + ); + } + + private static GetIndexResponse getResponseWithDefaultSettings() { + String indexName = "my_index"; + String indices[] = { indexName }; + ImmutableOpenMap.Builder defaultSettings = ImmutableOpenMap.builder(); + Settings.Builder indexDefaultSettings = Settings.builder(); + indexDefaultSettings.put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s"); + defaultSettings.put(indexName, indexDefaultSettings.build()); + return + new GetIndexResponse( + indices, getTestMappings(indexName), getTestAliases(indexName), getTestSettings(indexName), + defaultSettings.build() + ); + } + + public void testCanDecode622Response() throws IOException { + StreamInput si = StreamInput.wrap(Base64.getDecoder().decode(TEST_6_3_0_RESPONSE_BYTES)); + si.setVersion(Version.V_6_3_0); + GetIndexResponse response = new GetIndexResponse(); + response.readFrom(si); + + Assert.assertEquals(TEST_6_3_0_RESPONSE_INSTANCE, response); + } + + public void testCanOutput622Response() throws IOException { + GetIndexResponse responseWithExtraFields = getResponseWithDefaultSettings(); + BytesStreamOutput bso = new BytesStreamOutput(); + bso.setVersion(Version.V_6_3_0); + responseWithExtraFields.writeTo(bso); + String base64OfResponse = Base64.getEncoder().encodeToString(BytesReference.toBytes(bso.bytes())); + + Assert.assertEquals(TEST_6_3_0_RESPONSE_BYTES, base64OfResponse); + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java index 0fa5ca075fa8d..91c7841868393 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java @@ -80,8 +80,7 @@ protected GetMappingsResponse mutateInstance(GetMappingsResponse instance) throw return mutate(instance); } - @Override - protected GetMappingsResponse createTestInstance() { + public static ImmutableOpenMap createMappingsForIndex() { // rarely have no types int typeCount = rarely() ? 0 : scaledRandomIntBetween(1, 3); List typeMappings = new ArrayList<>(typeCount); @@ -104,8 +103,13 @@ protected GetMappingsResponse createTestInstance() { } ImmutableOpenMap.Builder typeBuilder = ImmutableOpenMap.builder(); typeMappings.forEach(mmd -> typeBuilder.put(mmd.type(), mmd)); + return typeBuilder.build(); + } + + @Override + protected GetMappingsResponse createTestInstance() { ImmutableOpenMap.Builder> indexBuilder = ImmutableOpenMap.builder(); - indexBuilder.put("index-" + randomAlphaOfLength(5), typeBuilder.build()); + indexBuilder.put("index-" + randomAlphaOfLength(5), createMappingsForIndex()); GetMappingsResponse resp = new GetMappingsResponse(indexBuilder.build()); logger.debug("--> created: {}", resp); return resp; From 69f8934101f495143f09da0e11a6810561f18d58 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Tue, 3 Jul 2018 14:52:52 +0200 Subject: [PATCH 17/36] Watcher: Reenable start/stop yaml tests (#31754) The underlying cause for this has been fixed, thus the tests can be reenabled. Closes #30298 --- .../resources/rest-api-spec/test/watcher/stats/10_basic.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml index 554e339687ba4..9844dea9135a3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml @@ -1,17 +1,11 @@ --- "Test watcher stats output": - - skip: - version: "all" - reason: "@AwaitsFix: https://github.com/elastic/elasticsearch/issues/30298" - do: {xpack.watcher.stats: {}} - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } --- "Test watcher stats supports emit_stacktraces parameter": - - skip: - version: "all" - reason: "@AwaitsFix: https://github.com/elastic/elasticsearch/issues/30298" - do: xpack.watcher.stats: metric: "all" From 4108722052707c4d9acc1b211e6fd2f77885dcc2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 3 Jul 2018 14:12:07 +0100 Subject: [PATCH 18/36] Add support for AWS session tokens (#30414) AWS supports the creation and use of credentials that are only valid for a fixed period of time. These credentials comprise three parts: the usual access key and secret key, together with a session token. This commit adds support for these three-part credentials to the EC2 discovery plugin and the S3 repository plugin. Note that session tokens are only valid for a limited period of time and yet there is no mechanism for refreshing or rotating them when they expire without restarting Elasticsearch. Nonetheless, this feature is already useful for nodes that need only run for a few days, such as for training, testing or evaluation. #29135 tracks the work towards allowing these credentials to be refreshed at runtime. Resolves #16428 --- docs/plugins/discovery-ec2.asciidoc | 8 +- docs/plugins/repository-s3.asciidoc | 4 + .../discovery/ec2/AwsEc2ServiceImpl.java | 10 +- .../discovery/ec2/Ec2ClientSettings.java | 62 +++-- .../discovery/ec2/Ec2DiscoveryPlugin.java | 1 + .../discovery/ec2/AwsEc2ServiceImplTests.java | 55 +++- .../ec2/Ec2DiscoveryPluginTests.java | 79 ++++-- .../repository-s3/qa/amazon-s3/build.gradle | 68 +++-- .../repositories/s3/AmazonS3Fixture.java | 50 +++- ...> 10_repository_permanent_credentials.yml} | 59 ++--- .../20_repository_temporary_credentials.yml | 243 ++++++++++++++++++ .../repositories/s3/S3ClientSettings.java | 34 ++- .../repositories/s3/S3RepositoryPlugin.java | 1 + .../repositories/s3/S3Service.java | 3 +- .../s3/S3ClientSettingsTests.java | 123 +++++++++ .../common/settings/SettingsException.java | 4 + 16 files changed, 685 insertions(+), 119 deletions(-) rename plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/{10_repository.yml => 10_repository_permanent_credentials.yml} (73%) create mode 100644 plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_temporary_credentials.yml create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java diff --git a/docs/plugins/discovery-ec2.asciidoc b/docs/plugins/discovery-ec2.asciidoc index 2e2bc9cf268fa..5cdc568e16250 100644 --- a/docs/plugins/discovery-ec2.asciidoc +++ b/docs/plugins/discovery-ec2.asciidoc @@ -40,11 +40,15 @@ Those that must be stored in the keystore are marked as `Secure`. `access_key`:: - An s3 access key. The `secret_key` setting must also be specified. (Secure) + An ec2 access key. The `secret_key` setting must also be specified. (Secure) `secret_key`:: - An s3 secret key. The `access_key` setting must also be specified. (Secure) + An ec2 secret key. The `access_key` setting must also be specified. (Secure) + +`session_token`:: + An ec2 session token. The `access_key` and `secret_key` settings must also + be specified. (Secure) `endpoint`:: diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 6701d53c24047..0d73e35f18ec3 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -73,6 +73,10 @@ are marked as `Secure`. An s3 secret key. The `access_key` setting must also be specified. (Secure) +`session_token`:: + An s3 session token. The `access_key` and `secret_key` settings must also + be specified. (Secure) + `endpoint`:: The s3 service endpoint to connect to. This will be automatically diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index 67902174630ea..a65500d9e2289 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -19,12 +19,9 @@ package org.elasticsearch.discovery.ec2; -import java.util.Random; -import java.util.concurrent.atomic.AtomicReference; - import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.internal.StaticCredentialsProvider; @@ -39,6 +36,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.LazyInitializable; +import java.util.Random; +import java.util.concurrent.atomic.AtomicReference; + class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service { public static final String EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/"; @@ -99,7 +99,7 @@ static ClientConfiguration buildConfiguration(Logger logger, Ec2ClientSettings c // pkg private for tests static AWSCredentialsProvider buildCredentials(Logger logger, Ec2ClientSettings clientSettings) { - final BasicAWSCredentials credentials = clientSettings.credentials; + final AWSCredentials credentials = clientSettings.credentials; if (credentials == null) { logger.debug("Using either environment variables, system properties or instance profile credentials"); return new DefaultAWSCredentialsProviderChain(); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java index b42b0d546001a..d76c9e820b8b1 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java @@ -21,14 +21,20 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; +import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.BasicAWSCredentials; - +import com.amazonaws.auth.BasicSessionCredentials; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.TimeValue; + import java.util.Locale; /** @@ -42,6 +48,9 @@ final class Ec2ClientSettings { /** The secret key (ie password) for connecting to ec2. */ static final Setting SECRET_KEY_SETTING = SecureSetting.secureString("discovery.ec2.secret_key", null); + /** The session token for connecting to ec2. */ + static final Setting SESSION_TOKEN_SETTING = SecureSetting.secureString("discovery.ec2.session_token", null); + /** The host name of a proxy to connect to ec2 through. */ static final Setting PROXY_HOST_SETTING = Setting.simpleString("discovery.ec2.proxy.host", Property.NodeScope); @@ -66,8 +75,12 @@ final class Ec2ClientSettings { static final Setting READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout", TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope); + private static final Logger logger = Loggers.getLogger(Ec2ClientSettings.class); + + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(logger); + /** Credentials to authenticate with ec2. */ - final BasicAWSCredentials credentials; + final AWSCredentials credentials; /** * The ec2 endpoint the client should talk to, or empty string to use the @@ -96,7 +109,7 @@ final class Ec2ClientSettings { /** The read timeout for the ec2 client. */ final int readTimeoutMillis; - protected Ec2ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, + protected Ec2ClientSettings(AWSCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, String proxyUsername, String proxyPassword, int readTimeoutMillis) { this.credentials = credentials; this.endpoint = endpoint; @@ -108,26 +121,45 @@ protected Ec2ClientSettings(BasicAWSCredentials credentials, String endpoint, Pr this.readTimeoutMillis = readTimeoutMillis; } - static BasicAWSCredentials loadCredentials(Settings settings) { - try (SecureString accessKey = ACCESS_KEY_SETTING.get(settings); - SecureString secretKey = SECRET_KEY_SETTING.get(settings);) { - if (accessKey.length() != 0) { - if (secretKey.length() != 0) { - return new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); + static AWSCredentials loadCredentials(Settings settings) { + try (SecureString key = ACCESS_KEY_SETTING.get(settings); + SecureString secret = SECRET_KEY_SETTING.get(settings); + SecureString sessionToken = SESSION_TOKEN_SETTING.get(settings)) { + if (key.length() == 0 && secret.length() == 0) { + if (sessionToken.length() > 0) { + throw new SettingsException("Setting [{}] is set but [{}] and [{}] are not", + SESSION_TOKEN_SETTING.getKey(), ACCESS_KEY_SETTING.getKey(), SECRET_KEY_SETTING.getKey()); + } + + logger.debug("Using either environment variables, system properties or instance profile credentials"); + return null; + } else { + if (key.length() == 0) { + DEPRECATION_LOGGER.deprecated("Setting [{}] is set but [{}] is not, which will be unsupported in future", + SECRET_KEY_SETTING.getKey(), ACCESS_KEY_SETTING.getKey()); + } + if (secret.length() == 0) { + DEPRECATION_LOGGER.deprecated("Setting [{}] is set but [{}] is not, which will be unsupported in future", + ACCESS_KEY_SETTING.getKey(), SECRET_KEY_SETTING.getKey()); + } + + final AWSCredentials credentials; + if (sessionToken.length() == 0) { + logger.debug("Using basic key/secret credentials"); + credentials = new BasicAWSCredentials(key.toString(), secret.toString()); } else { - throw new IllegalArgumentException("Missing secret key for ec2 client."); + logger.debug("Using basic session credentials"); + credentials = new BasicSessionCredentials(key.toString(), secret.toString(), sessionToken.toString()); } - } else if (secretKey.length() != 0) { - throw new IllegalArgumentException("Missing access key for ec2 client."); + return credentials; } - return null; } } // pkg private for tests /** Parse settings for a single client. */ static Ec2ClientSettings getClientSettings(Settings settings) { - final BasicAWSCredentials credentials = loadCredentials(settings); + final AWSCredentials credentials = loadCredentials(settings); try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings); SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) { return new Ec2ClientSettings( diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java index 9fc32ea306c0e..bb757dc05adba 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -106,6 +106,7 @@ public List> getSettings() { // Register EC2 discovery settings: discovery.ec2 Ec2ClientSettings.ACCESS_KEY_SETTING, Ec2ClientSettings.SECRET_KEY_SETTING, + Ec2ClientSettings.SESSION_TOKEN_SETTING, Ec2ClientSettings.ENDPOINT_SETTING, Ec2ClientSettings.PROTOCOL_SETTING, Ec2ClientSettings.PROXY_HOST_SETTING, diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java index a13fe47a632ae..148e58d7b3c06 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -23,10 +23,11 @@ import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.BasicSessionCredentials; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.ec2.AwsEc2ServiceImpl; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.instanceOf; @@ -44,15 +45,53 @@ public void testAWSCredentialsWithElasticsearchAwsSettings() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("discovery.ec2.access_key", "aws_key"); secureSettings.setString("discovery.ec2.secret_key", "aws_secret"); - final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - launchAWSCredentialsWithElasticsearchSettingsTest(settings, "aws_key", "aws_secret"); + final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, + Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build())).getCredentials(); + assertThat(credentials.getAWSAccessKeyId(), is("aws_key")); + assertThat(credentials.getAWSSecretKey(), is("aws_secret")); } - protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings settings, String expectedKey, String expectedSecret) { - final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, Ec2ClientSettings.getClientSettings(settings)) - .getCredentials(); - assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); - assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); + public void testAWSSessionCredentialsWithElasticsearchAwsSettings() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("discovery.ec2.access_key", "aws_key"); + secureSettings.setString("discovery.ec2.secret_key", "aws_secret"); + secureSettings.setString("discovery.ec2.session_token", "aws_session_token"); + final BasicSessionCredentials credentials = (BasicSessionCredentials) AwsEc2ServiceImpl.buildCredentials(logger, + Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build())).getCredentials(); + assertThat(credentials.getAWSAccessKeyId(), is("aws_key")); + assertThat(credentials.getAWSSecretKey(), is("aws_secret")); + assertThat(credentials.getSessionToken(), is("aws_session_token")); + } + + public void testDeprecationOfLoneAccessKey() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("discovery.ec2.access_key", "aws_key"); + final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, + Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build())).getCredentials(); + assertThat(credentials.getAWSAccessKeyId(), is("aws_key")); + assertThat(credentials.getAWSSecretKey(), is("")); + assertSettingDeprecationsAndWarnings(new String[]{}, + "Setting [discovery.ec2.access_key] is set but [discovery.ec2.secret_key] is not, which will be unsupported in future"); + } + + public void testDeprecationOfLoneSecretKey() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("discovery.ec2.secret_key", "aws_secret"); + final AWSCredentials credentials = AwsEc2ServiceImpl.buildCredentials(logger, + Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build())).getCredentials(); + assertThat(credentials.getAWSAccessKeyId(), is("")); + assertThat(credentials.getAWSSecretKey(), is("aws_secret")); + assertSettingDeprecationsAndWarnings(new String[]{}, + "Setting [discovery.ec2.secret_key] is set but [discovery.ec2.access_key] is not, which will be unsupported in future"); + } + + public void testRejectionOfLoneSessionToken() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("discovery.ec2.session_token", "aws_session_token"); + SettingsException e = expectThrows(SettingsException.class, () -> AwsEc2ServiceImpl.buildCredentials(logger, + Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()))); + assertThat(e.getMessage(), is( + "Setting [discovery.ec2.session_token] is set but [discovery.ec2.access_key] and [discovery.ec2.secret_key] are not")); } public void testAWSDefaultConfiguration() { diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java index 6001ab56d5042..720ffaddd74a5 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -19,22 +19,24 @@ package org.elasticsearch.discovery.ec2; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.auth.BasicSessionCredentials; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESTestCase; + import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -import org.elasticsearch.discovery.ec2.AwsEc2Service; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.ec2.Ec2DiscoveryPlugin; -import org.elasticsearch.node.Node; -import org.elasticsearch.test.ESTestCase; - public class Ec2DiscoveryPluginTests extends ESTestCase { private Settings getNodeAttributes(Settings settings, String url) { @@ -106,6 +108,10 @@ public void testClientSettingsReInit() throws IOException { final MockSecureSettings mockSecure1 = new MockSecureSettings(); mockSecure1.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), "ec2_access_1"); mockSecure1.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret_1"); + final boolean mockSecure1HasSessionToken = randomBoolean(); + if (mockSecure1HasSessionToken) { + mockSecure1.setString(Ec2ClientSettings.SESSION_TOKEN_SETTING.getKey(), "ec2_session_token_1"); + } mockSecure1.setString(Ec2ClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_1"); mockSecure1.setString(Ec2ClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_1"); final Settings settings1 = Settings.builder() @@ -117,6 +123,10 @@ public void testClientSettingsReInit() throws IOException { final MockSecureSettings mockSecure2 = new MockSecureSettings(); mockSecure2.setString(Ec2ClientSettings.ACCESS_KEY_SETTING.getKey(), "ec2_access_2"); mockSecure2.setString(Ec2ClientSettings.SECRET_KEY_SETTING.getKey(), "ec2_secret_2"); + final boolean mockSecure2HasSessionToken = randomBoolean(); + if (mockSecure2HasSessionToken) { + mockSecure2.setString(Ec2ClientSettings.SESSION_TOKEN_SETTING.getKey(), "ec2_session_token_2"); + } mockSecure2.setString(Ec2ClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_2"); mockSecure2.setString(Ec2ClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_2"); final Settings settings2 = Settings.builder() @@ -127,27 +137,50 @@ public void testClientSettingsReInit() throws IOException { .build(); try (Ec2DiscoveryPluginMock plugin = new Ec2DiscoveryPluginMock(settings1)) { try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) { - assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); - assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); - assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); - assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); - assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); - assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); - assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); + { + final AWSCredentials credentials = ((AmazonEC2Mock) clientReference.client()).credentials.getCredentials(); + assertThat(credentials.getAWSAccessKeyId(), is("ec2_access_1")); + assertThat(credentials.getAWSSecretKey(), is("ec2_secret_1")); + if (mockSecure1HasSessionToken) { + assertThat(credentials, instanceOf(BasicSessionCredentials.class)); + assertThat(((BasicSessionCredentials)credentials).getSessionToken(), is("ec2_session_token_1")); + } else { + assertThat(credentials, instanceOf(BasicAWSCredentials.class)); + } + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); + } // reload secure settings2 plugin.reload(settings2); // client is not released, it is still using the old settings - assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_1")); - assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_1")); - assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); - assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); - assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); - assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); - assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); + { + final AWSCredentials credentials = ((AmazonEC2Mock) clientReference.client()).credentials.getCredentials(); + if (mockSecure1HasSessionToken) { + assertThat(credentials, instanceOf(BasicSessionCredentials.class)); + assertThat(((BasicSessionCredentials)credentials).getSessionToken(), is("ec2_session_token_1")); + } else { + assertThat(credentials, instanceOf(BasicAWSCredentials.class)); + } + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_1")); + assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPort(), is(881)); + assertThat(((AmazonEC2Mock) clientReference.client()).endpoint, is("ec2_endpoint_1")); + } } try (AmazonEc2Reference clientReference = plugin.ec2Service.client()) { - assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSAccessKeyId(), is("ec2_access_2")); - assertThat(((AmazonEC2Mock) clientReference.client()).credentials.getCredentials().getAWSSecretKey(), is("ec2_secret_2")); + final AWSCredentials credentials = ((AmazonEC2Mock) clientReference.client()).credentials.getCredentials(); + assertThat(credentials.getAWSAccessKeyId(), is("ec2_access_2")); + assertThat(credentials.getAWSSecretKey(), is("ec2_secret_2")); + if (mockSecure2HasSessionToken) { + assertThat(credentials, instanceOf(BasicSessionCredentials.class)); + assertThat(((BasicSessionCredentials)credentials).getSessionToken(), is("ec2_session_token_2")); + } else { + assertThat(credentials, instanceOf(BasicAWSCredentials.class)); + } assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyUsername(), is("proxy_username_2")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyPassword(), is("proxy_password_2")); assertThat(((AmazonEC2Mock) clientReference.client()).configuration.getProxyHost(), is("proxy_host_2")); diff --git a/plugins/repository-s3/qa/amazon-s3/build.gradle b/plugins/repository-s3/qa/amazon-s3/build.gradle index dbbffdebded47..b6cc4a6de310d 100644 --- a/plugins/repository-s3/qa/amazon-s3/build.gradle +++ b/plugins/repository-s3/qa/amazon-s3/build.gradle @@ -31,47 +31,81 @@ integTestCluster { plugin ':plugins:repository-s3' } +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + boolean useFixture = false -String s3AccessKey = System.getenv("amazon_s3_access_key") -String s3SecretKey = System.getenv("amazon_s3_secret_key") -String s3Bucket = System.getenv("amazon_s3_bucket") -String s3BasePath = System.getenv("amazon_s3_base_path") +// We test against two repositories, one which uses the usual two-part "permanent" credentials and +// the other which uses three-part "temporary" or "session" credentials. + +String s3PermanentAccessKey = System.getenv("amazon_s3_access_key") +String s3PermanentSecretKey = System.getenv("amazon_s3_secret_key") +String s3PermanentBucket = System.getenv("amazon_s3_bucket") +String s3PermanentBasePath = System.getenv("amazon_s3_base_path") + +String s3TemporaryAccessKey = System.getenv("amazon_s3_access_key_temporary") +String s3TemporarySecretKey = System.getenv("amazon_s3_secret_key_temporary") +String s3TemporarySessionToken = System.getenv("amazon_s3_session_token_temporary") +String s3TemporaryBucket = System.getenv("amazon_s3_bucket_temporary") +String s3TemporaryBasePath = System.getenv("amazon_s3_base_path_temporary") + +// If all these variables are missing then we are testing against the internal fixture instead, which has the following +// credentials hard-coded in. + +if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3PermanentBasePath + && !s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3TemporaryBasePath && !s3TemporarySessionToken) { + + s3PermanentAccessKey = 's3_integration_test_permanent_access_key' + s3PermanentSecretKey = 's3_integration_test_permanent_secret_key' + s3PermanentBucket = 'permanent_bucket_test' + s3PermanentBasePath = 'integration_test' + + s3TemporaryAccessKey = 's3_integration_test_temporary_access_key' + s3TemporarySecretKey = 's3_integration_test_temporary_secret_key' + s3TemporaryBucket = 'temporary_bucket_test' + s3TemporaryBasePath = 'integration_test' + s3TemporarySessionToken = 's3_integration_test_temporary_session_token' -if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { - s3AccessKey = 's3_integration_test_access_key' - s3SecretKey = 's3_integration_test_secret_key' - s3Bucket = 'bucket_test' - s3BasePath = 'integration_test' useFixture = true } /** A task to start the AmazonS3Fixture which emulates a S3 service **/ task s3Fixture(type: AntFixture) { - dependsOn testClasses + dependsOn compileTestJava env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" executable = new File(project.runtimeJavaHome, 'bin/java') - args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3Bucket + args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3PermanentBucket, s3TemporaryBucket } Map expansions = [ - 'bucket': s3Bucket, - 'base_path': s3BasePath + 'permanent_bucket': s3PermanentBucket, + 'permanent_base_path': s3PermanentBasePath, + 'temporary_bucket': s3TemporaryBucket, + 'temporary_base_path': s3TemporaryBasePath ] - processTestResources { inputs.properties(expansions) MavenFilteringHack.filter(it, expansions) } integTestCluster { - keystoreSetting 's3.client.integration_test.access_key', s3AccessKey - keystoreSetting 's3.client.integration_test.secret_key', s3SecretKey + keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey + keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey + + keystoreSetting 's3.client.integration_test_temporary.access_key', s3TemporaryAccessKey + keystoreSetting 's3.client.integration_test_temporary.secret_key', s3TemporarySecretKey + keystoreSetting 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken if (useFixture) { + println "Using internal test service to test the repository-s3 plugin" dependsOn s3Fixture /* Use a closure on the string to delay evaluation until tests are executed */ - setting 's3.client.integration_test.endpoint', "http://${-> s3Fixture.addressAndPort}" + setting 's3.client.integration_test_permanent.endpoint', "http://${-> s3Fixture.addressAndPort}" + setting 's3.client.integration_test_temporary.endpoint', "http://${-> s3Fixture.addressAndPort}" } else { println "Using an external service to test the repository-s3 plugin" } diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java index d1034aff48248..fcb208258aa03 100644 --- a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java @@ -52,13 +52,19 @@ public class AmazonS3Fixture extends AbstractHttpFixture { /** Request handlers for the requests made by the S3 client **/ private final PathTrie handlers; + private final String permanentBucketName; + private final String temporaryBucketName; /** * Creates a {@link AmazonS3Fixture} */ - private AmazonS3Fixture(final String workingDir, final String bucket) { + private AmazonS3Fixture(final String workingDir, final String permanentBucketName, final String temporaryBucketName) { super(workingDir); - this.buckets.put(bucket, new Bucket(bucket)); + this.permanentBucketName = permanentBucketName; + this.temporaryBucketName = temporaryBucketName; + + this.buckets.put(permanentBucketName, new Bucket(permanentBucketName)); + this.buckets.put(temporaryBucketName, new Bucket(temporaryBucketName)); this.handlers = defaultHandlers(buckets); } @@ -67,21 +73,47 @@ protected Response handle(final Request request) throws IOException { final RequestHandler handler = handlers.retrieve(request.getMethod() + " " + request.getPath(), request.getParameters()); if (handler != null) { final String authorization = request.getHeader("Authorization"); - if (authorization == null - || (authorization.length() > 0 && authorization.contains("s3_integration_test_access_key") == false)) { - return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Access Denied", ""); + final String permittedBucket; + if (authorization.contains("s3_integration_test_permanent_access_key")) { + final String sessionToken = request.getHeader("x-amz-security-token"); + if (sessionToken != null) { + return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Unexpected session token", ""); + } + permittedBucket = permanentBucketName; + } else if (authorization.contains("s3_integration_test_temporary_access_key")) { + final String sessionToken = request.getHeader("x-amz-security-token"); + if (sessionToken == null) { + return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "No session token", ""); + } + if (sessionToken.equals("s3_integration_test_temporary_session_token") == false) { + return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Bad session token", ""); + } + permittedBucket = temporaryBucketName; + } else { + return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Bad access key", ""); + } + + if (handler != null) { + final String bucket = request.getParam("bucket"); + if (bucket != null && permittedBucket.equals(bucket) == false) { + // allow a null bucket to support bucket-free APIs + return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Bad bucket", ""); + } + return handler.handle(request); + } else { + return newInternalError(request.getId(), "No handler defined for request [" + request + "]"); } - return handler.handle(request); } return null; } public static void main(final String[] args) throws Exception { - if (args == null || args.length != 2) { - throw new IllegalArgumentException("AmazonS3Fixture "); + if (args == null || args.length != 3) { + throw new IllegalArgumentException( + "AmazonS3Fixture "); } - final AmazonS3Fixture fixture = new AmazonS3Fixture(args[0], args[1]); + final AmazonS3Fixture fixture = new AmazonS3Fixture(args[0], args[1], args[2]); fixture.listen(); } diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository_permanent_credentials.yml similarity index 73% rename from plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml rename to plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository_permanent_credentials.yml index 56e2b2cb8fa16..bb934d0931ca9 100644 --- a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository.yml +++ b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository_permanent_credentials.yml @@ -3,34 +3,35 @@ --- setup: - # Register repository + # Register repository with permanent credentials - do: snapshot.create_repository: - repository: repository + repository: repository_permanent body: type: s3 settings: - bucket: ${bucket} - client: integration_test - base_path: ${base_path} + bucket: ${permanent_bucket} + client: integration_test_permanent + base_path: ${permanent_base_path} canned_acl: private storage_class: standard --- -"Snapshot/Restore with repository-s3": +"Snapshot/Restore with repository-s3 using permanent credentials": # Get repository - do: snapshot.get_repository: - repository: repository + repository: repository_permanent - - match: { repository.settings.bucket : ${bucket} } - - match: { repository.settings.client : "integration_test" } - - match: { repository.settings.base_path : ${base_path} } - - match: { repository.settings.canned_acl : "private" } - - match: { repository.settings.storage_class : "standard" } - - is_false: repository.settings.access_key - - is_false: repository.settings.secret_key + - match: { repository_permanent.settings.bucket : ${permanent_bucket} } + - match: { repository_permanent.settings.client : "integration_test_permanent" } + - match: { repository_permanent.settings.base_path : ${permanent_base_path} } + - match: { repository_permanent.settings.canned_acl : "private" } + - match: { repository_permanent.settings.storage_class : "standard" } + - is_false: repository_permanent.settings.access_key + - is_false: repository_permanent.settings.secret_key + - is_false: repository_permanent.settings.session_token # Index documents - do: @@ -62,7 +63,7 @@ setup: # Create a first snapshot - do: snapshot.create: - repository: repository + repository: repository_permanent snapshot: snapshot-one wait_for_completion: true @@ -73,7 +74,7 @@ setup: - do: snapshot.status: - repository: repository + repository: repository_permanent snapshot: snapshot-one - is_true: snapshots @@ -115,7 +116,7 @@ setup: # Create a second snapshot - do: snapshot.create: - repository: repository + repository: repository_permanent snapshot: snapshot-two wait_for_completion: true @@ -125,7 +126,7 @@ setup: - do: snapshot.get: - repository: repository + repository: repository_permanent snapshot: snapshot-one,snapshot-two - is_true: snapshots @@ -140,7 +141,7 @@ setup: # Restore the second snapshot - do: snapshot.restore: - repository: repository + repository: repository_permanent snapshot: snapshot-two wait_for_completion: true @@ -158,7 +159,7 @@ setup: # Restore the first snapshot - do: snapshot.restore: - repository: repository + repository: repository_permanent snapshot: snapshot-one wait_for_completion: true @@ -171,12 +172,12 @@ setup: # Remove the snapshots - do: snapshot.delete: - repository: repository + repository: repository_permanent snapshot: snapshot-two - do: snapshot.delete: - repository: repository + repository: repository_permanent snapshot: snapshot-one --- @@ -185,7 +186,7 @@ setup: - do: catch: /repository_exception/ snapshot.create_repository: - repository: repository + repository: repository_permanent body: type: s3 settings: @@ -198,11 +199,11 @@ setup: - do: catch: /repository_exception/ snapshot.create_repository: - repository: repository + repository: repository_permanent body: type: s3 settings: - bucket: repository + bucket: repository_permanent client: unknown --- @@ -211,7 +212,7 @@ setup: - do: catch: /snapshot_missing_exception/ snapshot.get: - repository: repository + repository: repository_permanent snapshot: missing --- @@ -220,7 +221,7 @@ setup: - do: catch: /snapshot_missing_exception/ snapshot.delete: - repository: repository + repository: repository_permanent snapshot: missing --- @@ -229,7 +230,7 @@ setup: - do: catch: /snapshot_restore_exception/ snapshot.restore: - repository: repository + repository: repository_permanent snapshot: missing wait_for_completion: true @@ -239,4 +240,4 @@ teardown: # Remove our repository - do: snapshot.delete_repository: - repository: repository + repository: repository_permanent diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_temporary_credentials.yml b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_temporary_credentials.yml new file mode 100644 index 0000000000000..5da4f739cd522 --- /dev/null +++ b/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_temporary_credentials.yml @@ -0,0 +1,243 @@ +# Integration tests for repository-s3 + +--- +setup: + + # Register repository with temporary credentials + - do: + snapshot.create_repository: + repository: repository_temporary + body: + type: s3 + settings: + bucket: ${temporary_bucket} + client: integration_test_temporary + base_path: ${temporary_base_path} + canned_acl: private + storage_class: standard + +--- +"Snapshot/Restore with repository-s3 using temporary credentials": + + # Get repository + - do: + snapshot.get_repository: + repository: repository_temporary + + - match: { repository_temporary.settings.bucket : ${temporary_bucket} } + - match: { repository_temporary.settings.client : "integration_test_temporary" } + - match: { repository_temporary.settings.base_path : ${temporary_base_path} } + - match: { repository_temporary.settings.canned_acl : "private" } + - match: { repository_temporary.settings.storage_class : "standard" } + - is_false: repository_temporary.settings.access_key + - is_false: repository_temporary.settings.secret_key + - is_false: repository_temporary.settings.session_token + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository_temporary + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository_temporary + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository_temporary + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository_temporary + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository_temporary + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository_temporary + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository_temporary + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository_temporary + snapshot: snapshot-one + +--- +"Register a repository with a non existing bucket": + + - do: + catch: /repository_exception/ + snapshot.create_repository: + repository: repository_temporary + body: + type: s3 + settings: + bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE + client: integration_test + +--- +"Register a repository with a non existing client": + + - do: + catch: /repository_exception/ + snapshot.create_repository: + repository: repository_temporary + body: + type: s3 + settings: + bucket: repository_temporary + client: unknown + +--- +"Get a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.get: + repository: repository_temporary + snapshot: missing + +--- +"Delete a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.delete: + repository: repository_temporary + snapshot: missing + +--- +"Restore a non existing snapshot": + + - do: + catch: /snapshot_restore_exception/ + snapshot.restore: + repository: repository_temporary + snapshot: missing + wait_for_completion: true + +--- +teardown: + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository_temporary diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index ef6088fe154bf..795304541be35 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -26,8 +26,10 @@ import java.util.Set; import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; +import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.auth.BasicSessionCredentials; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; @@ -52,6 +54,10 @@ final class S3ClientSettings { static final Setting.AffixSetting SECRET_KEY_SETTING = Setting.affixKeySetting(PREFIX, "secret_key", key -> SecureSetting.secureString(key, null)); + /** The secret key (ie password) for connecting to s3. */ + static final Setting.AffixSetting SESSION_TOKEN_SETTING = Setting.affixKeySetting(PREFIX, "session_token", + key -> SecureSetting.secureString(key, null)); + /** An override for the s3 endpoint to connect to. */ static final Setting.AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint", key -> new Setting<>(key, "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope)); @@ -89,7 +95,7 @@ final class S3ClientSettings { key -> Setting.boolSetting(key, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope)); /** Credentials to authenticate with s3. */ - final BasicAWSCredentials credentials; + final AWSCredentials credentials; /** The s3 endpoint the client should talk to, or empty string to use the default. */ final String endpoint; @@ -120,7 +126,7 @@ final class S3ClientSettings { /** Whether the s3 client should use an exponential backoff retry policy. */ final boolean throttleRetries; - protected S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, + protected S3ClientSettings(AWSCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, String proxyUsername, String proxyPassword, int readTimeoutMillis, int maxRetries, boolean throttleRetries) { this.credentials = credentials; @@ -190,26 +196,36 @@ static BasicAWSCredentials loadDeprecatedCredentials(Settings repositorySettings } } - static BasicAWSCredentials loadCredentials(Settings settings, String clientName) { + static AWSCredentials loadCredentials(Settings settings, String clientName) { try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING); - SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING);) { + SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING); + SecureString sessionToken = getConfigValue(settings, clientName, SESSION_TOKEN_SETTING)) { if (accessKey.length() != 0) { if (secretKey.length() != 0) { - return new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); + if (sessionToken.length() != 0) { + return new BasicSessionCredentials(accessKey.toString(), secretKey.toString(), sessionToken.toString()); + } else { + return new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); + } } else { throw new IllegalArgumentException("Missing secret key for s3 client [" + clientName + "]"); } - } else if (secretKey.length() != 0) { - throw new IllegalArgumentException("Missing access key for s3 client [" + clientName + "]"); + } else { + if (secretKey.length() != 0) { + throw new IllegalArgumentException("Missing access key for s3 client [" + clientName + "]"); + } + if (sessionToken.length() != 0) { + throw new IllegalArgumentException("Missing access key and secret key for s3 client [" + clientName + "]"); + } + return null; } - return null; } } // pkg private for tests /** Parse settings for a single client. */ static S3ClientSettings getClientSettings(Settings settings, String clientName) { - final BasicAWSCredentials credentials = S3ClientSettings.loadCredentials(settings, clientName); + final AWSCredentials credentials = S3ClientSettings.loadCredentials(settings, clientName); try (SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) { return new S3ClientSettings( diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 6a605319114fe..79a5187059f38 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -92,6 +92,7 @@ public List> getSettings() { // named s3 client configuration settings S3ClientSettings.ACCESS_KEY_SETTING, S3ClientSettings.SECRET_KEY_SETTING, + S3ClientSettings.SESSION_TOKEN_SETTING, S3ClientSettings.ENDPOINT_SETTING, S3ClientSettings.PROTOCOL_SETTING, S3ClientSettings.PROXY_HOST_SETTING, diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index b59f740f2048d..91a7a30024b78 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -22,7 +22,6 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.InstanceProfileCredentialsProvider; import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.internal.StaticCredentialsProvider; @@ -134,7 +133,7 @@ static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { // pkg private for tests static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) { - final BasicAWSCredentials credentials = clientSettings.credentials; + final AWSCredentials credentials = clientSettings.credentials; if (credentials == null) { logger.debug("Using instance profile credentials"); return new PrivilegedInstanceProfileCredentialsProvider(); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java new file mode 100644 index 0000000000000..e629f43f8a3d3 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java @@ -0,0 +1,123 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.auth.BasicSessionCredentials; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isEmptyString; +import static org.hamcrest.Matchers.nullValue; + +public class S3ClientSettingsTests extends ESTestCase { + public void testThereIsADefaultClientByDefault() { + final Map settings = S3ClientSettings.load(Settings.EMPTY); + assertThat(settings.keySet(), contains("default")); + + final S3ClientSettings defaultSettings = settings.get("default"); + assertThat(defaultSettings.credentials, nullValue()); + assertThat(defaultSettings.endpoint, isEmptyString()); + assertThat(defaultSettings.protocol, is(Protocol.HTTPS)); + assertThat(defaultSettings.proxyHost, isEmptyString()); + assertThat(defaultSettings.proxyPort, is(80)); + assertThat(defaultSettings.proxyUsername, isEmptyString()); + assertThat(defaultSettings.proxyPassword, isEmptyString()); + assertThat(defaultSettings.readTimeoutMillis, is(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT)); + assertThat(defaultSettings.maxRetries, is(ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry())); + assertThat(defaultSettings.throttleRetries, is(ClientConfiguration.DEFAULT_THROTTLE_RETRIES)); + } + + public void testDefaultClientSettingsCanBeSet() { + final Map settings = S3ClientSettings.load(Settings.builder() + .put("s3.client.default.max_retries", 10).build()); + assertThat(settings.keySet(), contains("default")); + + final S3ClientSettings defaultSettings = settings.get("default"); + assertThat(defaultSettings.maxRetries, is(10)); + } + + public void testNondefaultClientCreatedBySettingItsSettings() { + final Map settings = S3ClientSettings.load(Settings.builder() + .put("s3.client.another_client.max_retries", 10).build()); + assertThat(settings.keySet(), contains("default", "another_client")); + + final S3ClientSettings defaultSettings = settings.get("default"); + assertThat(defaultSettings.maxRetries, is(ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry())); + + final S3ClientSettings anotherClientSettings = settings.get("another_client"); + assertThat(anotherClientSettings.maxRetries, is(10)); + } + + public void testRejectionOfLoneAccessKey() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", "aws_key"); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build())); + assertThat(e.getMessage(), is("Missing secret key for s3 client [default]")); + } + + public void testRejectionOfLoneSecretKey() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.secret_key", "aws_key"); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build())); + assertThat(e.getMessage(), is("Missing access key for s3 client [default]")); + } + + public void testRejectionOfLoneSessionToken() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.session_token", "aws_key"); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build())); + assertThat(e.getMessage(), is("Missing access key and secret key for s3 client [default]")); + } + + public void testCredentialsTypeWithAccessKeyAndSecretKey() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", "access_key"); + secureSettings.setString("s3.client.default.secret_key", "secret_key"); + final Map settings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()); + final S3ClientSettings defaultSettings = settings.get("default"); + BasicAWSCredentials credentials = (BasicAWSCredentials) defaultSettings.credentials; + assertThat(credentials.getAWSAccessKeyId(), is("access_key")); + assertThat(credentials.getAWSSecretKey(), is("secret_key")); + } + + public void testCredentialsTypeWithAccessKeyAndSecretKeyAndSessionToken() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", "access_key"); + secureSettings.setString("s3.client.default.secret_key", "secret_key"); + secureSettings.setString("s3.client.default.session_token", "session_token"); + final Map settings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()); + final S3ClientSettings defaultSettings = settings.get("default"); + BasicSessionCredentials credentials = (BasicSessionCredentials) defaultSettings.credentials; + assertThat(credentials.getAWSAccessKeyId(), is("access_key")); + assertThat(credentials.getAWSSecretKey(), is("secret_key")); + assertThat(credentials.getSessionToken(), is("session_token")); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsException.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsException.java index f7d4843c1c03e..ad5f56d7fc0f3 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsException.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsException.java @@ -42,4 +42,8 @@ public SettingsException(String message, Throwable cause) { public SettingsException(StreamInput in) throws IOException { super(in); } + + public SettingsException(String msg, Object... args) { + super(msg, args); + } } From de9e56aa0161abb30047e7d96b814ad250b69165 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Tue, 3 Jul 2018 16:56:31 +0300 Subject: [PATCH 19/36] DOC: Add examples to the SQL docs (#31633) Significantly improve the example snippets in the documentation. The examples are part of the test suite and checked nightly. To help readability, the existing dataset was extended (test_emp renamed to emp plus library). Improve output of JDBC tests to be consistent with the CLI Add lenient flag to JDBC asserts to allow type widening (a long is equivalent to a integer as long as the value is the same). --- .../language/syntax/describe-table.asciidoc | 5 + .../sql/language/syntax/select.asciidoc | 249 ++++--- .../sql/language/syntax/show-columns.asciidoc | 5 + .../language/syntax/show-functions.asciidoc | 31 + .../sql/language/syntax/show-tables.asciidoc | 34 +- .../xpack/sql/jdbc/jdbc/TypeConverter.java | 10 +- x-pack/qa/sql/build.gradle | 3 + .../qa/sql/nosecurity/JdbcDocCsvSpectIT.java | 90 +++ .../xpack/qa/sql/jdbc/CsvSpecTestCase.java | 12 +- .../xpack/qa/sql/jdbc/CsvTestUtils.java | 6 +- .../xpack/qa/sql/jdbc/DataLoader.java | 65 +- .../xpack/qa/sql/jdbc/DebugCsvSpec.java | 10 - .../xpack/qa/sql/jdbc/JdbcAssert.java | 89 ++- .../xpack/qa/sql/jdbc/JdbcTestUtils.java | 38 ++ .../sql/jdbc/SpecBaseIntegrationTestCase.java | 23 +- .../xpack/qa/sql/jdbc/SqlSpecTestCase.java | 10 - .../qa/sql/src/main/resources/docs.csv-spec | 639 ++++++++++++++++++ x-pack/qa/sql/src/main/resources/library.csv | 25 + .../qa/sql/src/main/resources/select.sql-spec | 2 - 19 files changed, 1200 insertions(+), 146 deletions(-) create mode 100644 x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpectIT.java create mode 100644 x-pack/qa/sql/src/main/resources/docs.csv-spec create mode 100644 x-pack/qa/sql/src/main/resources/library.csv diff --git a/docs/reference/sql/language/syntax/describe-table.asciidoc b/docs/reference/sql/language/syntax/describe-table.asciidoc index dd2d27a5781d2..396be25bb5170 100644 --- a/docs/reference/sql/language/syntax/describe-table.asciidoc +++ b/docs/reference/sql/language/syntax/describe-table.asciidoc @@ -20,3 +20,8 @@ DESC table .Description `DESC` and `DESCRIBE` are aliases to <>. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[describeTable] +---- diff --git a/docs/reference/sql/language/syntax/select.asciidoc b/docs/reference/sql/language/syntax/select.asciidoc index 4a7c0534b68a3..ad3b564bb00d0 100644 --- a/docs/reference/sql/language/syntax/select.asciidoc +++ b/docs/reference/sql/language/syntax/select.asciidoc @@ -36,23 +36,26 @@ The general execution of `SELECT` is as follows: As with a table, every output column of a `SELECT` has a name which can be either specified per column through the `AS` keyword : -[source,sql] +["source","sql",subs="attributes,callouts,macros"] ---- -SELECT column AS c +include-tagged::{sql-specs}/docs.csv-spec[selectColumnAlias] ---- +Note: `AS` is an optional keyword however it helps with the readability and in some case ambiguity of the query +which is why it is recommended to specify it. + assigned by {es-sql} if no name is given: -[source,sql] +["source","sql",subs="attributes,callouts,macros"] ---- -SELECT 1 + 1 +include-tagged::{sql-specs}/docs.csv-spec[selectInline] ---- or if it's a simple column reference, use its name as the column name: -[source,sql] +["source","sql",subs="attributes,callouts,macros"] ---- -SELECT col FROM table +include-tagged::{sql-specs}/docs.csv-spec[selectColumn] ---- [[sql-syntax-select-wildcard]] @@ -61,11 +64,11 @@ SELECT col FROM table To select all the columns in the source, one can use `*`: ["source","sql",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{sql-specs}/select.sql-spec[wildcardWithOrder] --------------------------------------------------- +---- +include-tagged::{sql-specs}/docs.csv-spec[wildcardWithOrder] +---- -which essentially returns all columsn found. +which essentially returns all(top-level fields, sub-fields, such as multi-fields are ignored] columns found. [[sql-syntax-from]] [float] @@ -83,17 +86,30 @@ where: `table_name`:: Represents the name (optionally qualified) of an existing table, either a concrete or base one (actual index) or alias. + + If the table name contains special SQL characters (such as `.`,`-`,etc...) use double quotes to escape them: -[source, sql] + +["source","sql",subs="attributes,callouts,macros"] ---- -SELECT ... FROM "some-table" +include-tagged::{sql-specs}/docs.csv-spec[fromTableQuoted] ---- The name can be a <> pointing to multiple indices (likely requiring quoting as mentioned above) with the restriction that *all* resolved concrete tables have **exact mapping**. +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[fromTablePatternQuoted] +---- + `alias`:: A substitute name for the `FROM` item containing the alias. An alias is used for brevity or to eliminate ambiguity. When an alias is provided, it completely hides the actual name of the table and must be used in its place. +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[fromTableAlias] +---- + [[sql-syntax-where]] [float] ==== WHERE Clause @@ -111,6 +127,11 @@ where: Represents an expression that evaluates to a `boolean`. Only the rows that match the condition (to `true`) are returned. +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[basicWhere] +---- + [[sql-syntax-group-by]] [float] ==== GROUP BY @@ -126,10 +147,80 @@ where: `grouping_element`:: -Represents an expression on which rows are being grouped _on_. It can be a column name, name or ordinal number of a column or an arbitrary expression of column values. +Represents an expression on which rows are being grouped _on_. It can be a column name, alias or ordinal number of a column or an arbitrary expression of column values. + +A common, group by column name: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByColumn] +---- + +Grouping by output ordinal: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByOrdinal] +---- + +Grouping by alias: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByAlias] +---- + +And grouping by column expression (typically used along-side an alias): + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByExpression] +---- When a `GROUP BY` clause is used in a `SELECT`, _all_ output expressions must be either aggregate functions or expressions used for grouping or derivates of (otherwise there would be more than one possible value to return for each ungrouped column). +To wit: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByAndAgg] +---- + +Expressions over aggregates used in output: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByAndAggExpression] +---- + +Multiple aggregates used: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByAndMultipleAggs] +---- + +[[sql-syntax-group-by-implicit]] +[float] +===== Implicit Grouping + +When an aggregation is used without an associated `GROUP BY`, an __implicit grouping__ is applied, meaning all selected rows are considered to form a single default, or implicit group. +As such, the query emits only a single row (as there is only a single group). + +A common example is counting the number of records: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByImplicitCount] +---- + +Of course, multiple aggregations can be applied: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByImplicitMultipleAggs] +---- + [[sql-syntax-having]] [float] ==== HAVING @@ -147,13 +238,44 @@ where: Represents an expression that evaluates to a `boolean`. Only groups that match the condition (to `true`) are returned. -Both `WHERE` and `HAVING` are used for filtering however there are several differences between them: +Both `WHERE` and `HAVING` are used for filtering however there are several significant differences between them: . `WHERE` works on individual *rows*, `HAVING` works on the *groups* created by ``GROUP BY`` . `WHERE` is evaluated *before* grouping, `HAVING` is evaluated *after* grouping -Note that it is possible to have a `HAVING` clause without a ``GROUP BY``. In this case, an __implicit grouping__ is applied, meaning all selected rows are considered to form a single group and `HAVING` can be applied on any of the aggregate functions specified on this group. ` -As such a query emits only a single row (as there is only a single group), `HAVING` condition returns either one row (the group) or zero if the condition fails. +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByHaving] +---- + +Further more, one can use multiple aggregate expressions inside `HAVING` even ones that are not used in the output (`SELECT`): + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByHavingMultiple] +---- + +[[sql-syntax-having-group-by-implicit]] +[float] +===== Implicit Grouping + +As indicated above, it is possible to have a `HAVING` clause without a ``GROUP BY``. In this case, the so-called <> is applied, meaning all selected rows are considered to form a single group and `HAVING` can be applied on any of the aggregate functions specified on this group. ` +As such, the query emits only a single row (as there is only a single group) and `HAVING` condition returns either one row (the group) or zero if the condition fails. + +In this example, `HAVING` matches: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[groupByHavingImplicitMatch] +---- + +//However `HAVING` can also not match, in which case an empty result is returned: +// +//["source","sql",subs="attributes,callouts,macros"] +//---- +//include-tagged::{sql-specs}/docs.csv-spec[groupByHavingImplicitNoMatch] +//---- + [[sql-syntax-order-by]] [float] @@ -178,30 +300,10 @@ IMPORTANT: When used along-side, `GROUP BY` expression can point _only_ to the c For example, the following query sorts by an arbitrary input field (`page_count`): -[source,js] --------------------------------------------------- -POST /_xpack/sql?format=txt -{ - "query": "SELECT * FROM library ORDER BY page_count DESC LIMIT 5" -} --------------------------------------------------- -// CONSOLE -// TEST[setup:library] - -which results in something like: - -[source,text] --------------------------------------------------- - author | name | page_count | release_date ------------------+--------------------+---------------+------------------------ -Peter F. Hamilton|Pandora's Star |768 |2004-03-02T00:00:00.000Z -Vernor Vinge |A Fire Upon the Deep|613 |1992-06-01T00:00:00.000Z -Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z -Alastair Reynolds|Revelation Space |585 |2000-03-15T00:00:00.000Z -James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00.000Z --------------------------------------------------- -// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/] -// TESTRESPONSE[_cat] +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[orderByBasic] +---- [[sql-syntax-order-by-score]] ==== Order By Score @@ -215,54 +317,18 @@ combined using the same rules as {es}'s To sort based on the `score`, use the special function `SCORE()`: -[source,js] --------------------------------------------------- -POST /_xpack/sql?format=txt -{ - "query": "SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY SCORE() DESC" -} --------------------------------------------------- -// CONSOLE -// TEST[setup:library] - -Which results in something like: - -[source,text] --------------------------------------------------- - SCORE() | author | name | page_count | release_date ----------------+---------------+-------------------+---------------+------------------------ -2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z -1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00.000Z -1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00.000Z -1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00.000Z --------------------------------------------------- -// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/ s/\(/\\\(/ s/\)/\\\)/] -// TESTRESPONSE[_cat] - -Note that you can return `SCORE()` by adding it to the where clause. This -is possible even if you are not sorting by `SCORE()`: - -[source,js] --------------------------------------------------- -POST /_xpack/sql?format=txt -{ - "query": "SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY page_count DESC" -} --------------------------------------------------- -// CONSOLE -// TEST[setup:library] - -[source,text] --------------------------------------------------- - SCORE() | author | name | page_count | release_date ----------------+---------------+-------------------+---------------+------------------------ -2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z -1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00.000Z -1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00.000Z -1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00.000Z --------------------------------------------------- -// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/ s/\(/\\\(/ s/\)/\\\)/] -// TESTRESPONSE[_cat] +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[orderByScore] +---- + +Note that you can return `SCORE()` by using a full-text search predicate in the `WHERE` clause. +This is possible even if `SCORE()` is not used for sorting: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[orderByScoreWithMatch] +---- NOTE: Trying to return `score` from a non full-text queries will return the same value for all results, as @@ -284,3 +350,10 @@ where count:: is a positive integer or zero indicating the maximum *possible* number of results being returned (as there might be less matches than the limit). If `0` is specified, no results are returned. ALL:: indicates there is no limit and thus all results are being returned. + +To return + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[limitBasic] +---- \ No newline at end of file diff --git a/docs/reference/sql/language/syntax/show-columns.asciidoc b/docs/reference/sql/language/syntax/show-columns.asciidoc index a52c744f17a97..539c35c57952a 100644 --- a/docs/reference/sql/language/syntax/show-columns.asciidoc +++ b/docs/reference/sql/language/syntax/show-columns.asciidoc @@ -12,3 +12,8 @@ SHOW COLUMNS [ FROM | IN ] ? table .Description List the columns in table and their data type (and other attributes). + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showColumns] +---- diff --git a/docs/reference/sql/language/syntax/show-functions.asciidoc b/docs/reference/sql/language/syntax/show-functions.asciidoc index 964cdf39081c6..1e4220ef5295c 100644 --- a/docs/reference/sql/language/syntax/show-functions.asciidoc +++ b/docs/reference/sql/language/syntax/show-functions.asciidoc @@ -14,3 +14,34 @@ SHOW FUNCTIONS [ LIKE? pattern<1>? ]? .Description List all the SQL functions and their type. The `LIKE` clause can be used to restrict the list of names to the given pattern. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showFunctions] +---- + +The list of functions returned can be customized based on the pattern. + +It can be an exact match: +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showFunctionsLikeExact] +---- + +A wildcard for exactly one character: +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showFunctionsLikeChar] +---- + +A wildcard matching zero or more characters: +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showFunctionsLikeWildcard] +---- + +Or of course, a variation of the above: +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showFunctionsWithPattern] +---- diff --git a/docs/reference/sql/language/syntax/show-tables.asciidoc b/docs/reference/sql/language/syntax/show-tables.asciidoc index 7772c39c6fc21..b401e9f7d900a 100644 --- a/docs/reference/sql/language/syntax/show-tables.asciidoc +++ b/docs/reference/sql/language/syntax/show-tables.asciidoc @@ -13,4 +13,36 @@ SHOW TABLES [ LIKE? pattern<1>? ]? .Description -List the tables available to the current user and their type. The `LIKE` clause can be used to restrict the list of names to the given pattern. +List the tables available to the current user and their type. + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showTables] +---- + +The `LIKE` clause can be used to restrict the list of names to the given pattern. + +The pattern can be an exact match: +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeExact] +---- + +Multiple chars: +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeWildcard] +---- + +A single char: +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeOneChar] +---- + + +Or a mixture of single and multiple chars: +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeMixed] +---- diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java index 782a17257d424..aa9d434f332e3 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverter.java @@ -61,9 +61,9 @@ private TypeConverter() { static { Map, JDBCType> aMap = Arrays.stream(DataType.values()) - .filter(dataType -> dataType.javaClass() != null - && dataType != DataType.HALF_FLOAT - && dataType != DataType.SCALED_FLOAT + .filter(dataType -> dataType.javaClass() != null + && dataType != DataType.HALF_FLOAT + && dataType != DataType.SCALED_FLOAT && dataType != DataType.TEXT) .collect(Collectors.toMap(dataType -> dataType.javaClass(), dataType -> dataType.jdbcType)); // apart from the mappings in {@code DataType} three more Java classes can be mapped to a {@code JDBCType.TIMESTAMP} @@ -428,7 +428,7 @@ private static Float asFloat(Object val, JDBCType columnType) throws SQLExceptio case SMALLINT: case INTEGER: case BIGINT: - return Float.valueOf((float) ((Number) val).longValue()); + return Float.valueOf(((Number) val).longValue()); case REAL: case FLOAT: case DOUBLE: @@ -447,7 +447,7 @@ private static Double asDouble(Object val, JDBCType columnType) throws SQLExcept case SMALLINT: case INTEGER: case BIGINT: - return Double.valueOf((double) ((Number) val).longValue()); + return Double.valueOf(((Number) val).longValue()); case REAL: case FLOAT: case DOUBLE: diff --git a/x-pack/qa/sql/build.gradle b/x-pack/qa/sql/build.gradle index 4d061fffa9110..a56325dff2c35 100644 --- a/x-pack/qa/sql/build.gradle +++ b/x-pack/qa/sql/build.gradle @@ -10,6 +10,8 @@ dependencies { // JDBC testing dependencies compile project(path: xpackModule('sql:jdbc'), configuration: 'nodeps') + + compile project(path: xpackModule('sql:sql-action')) compile "net.sourceforge.csvjdbc:csvjdbc:1.0.34" // CLI testing dependencies @@ -76,6 +78,7 @@ thirdPartyAudit.excludes = [ subprojects { apply plugin: 'elasticsearch.standalone-rest-test' dependencies { + /* Since we're a standalone rest test we actually get transitive * dependencies but we don't really want them because they cause * all kinds of trouble with the jar hell checks. So we suppress diff --git a/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpectIT.java b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpectIT.java new file mode 100644 index 0000000000000..24e8c170cc39d --- /dev/null +++ b/x-pack/qa/sql/no-security/src/test/java/org/elasticsearch/xpack/qa/sql/nosecurity/JdbcDocCsvSpectIT.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.qa.sql.nosecurity; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; +import org.elasticsearch.xpack.qa.sql.jdbc.DataLoader; +import org.elasticsearch.xpack.qa.sql.jdbc.JdbcAssert; +import org.elasticsearch.xpack.qa.sql.jdbc.SpecBaseIntegrationTestCase; +import org.elasticsearch.xpack.qa.sql.jdbc.SqlSpecTestCase; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.List; + +import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.csvConnection; +import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.executeCsvQuery; +import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.specParser; + +/** + * CSV test specification for DOC examples. + * While we could use the existing tests, their purpose is to test corner-cases which + * gets reflected in the dataset structure. + * The doc tests while redundant, try to be expressive first and foremost and sometimes + * the dataset isn't exactly convenient. + * + * Also looking around for the tests across the test files isn't trivial. + * + * That's not to say the two cannot be merged however that felt like too much of an effort + * at this stage and, to not keep things stalling, started with this approach. + */ +public class JdbcDocCsvSpectIT extends SpecBaseIntegrationTestCase { + + private final CsvTestCase testCase; + + @Override + protected String indexName() { + return "library"; + } + + @Override + protected void loadDataset(RestClient client) throws Exception { + DataLoader.loadDocsDatasetIntoEs(client); + } + + @ParametersFactory(shuffle = false, argumentFormatting = SqlSpecTestCase.PARAM_FORMATTING) + public static List readScriptSpec() throws Exception { + Parser parser = specParser(); + return readScriptSpec("/docs.csv-spec", parser); + } + + public JdbcDocCsvSpectIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber); + this.testCase = testCase; + } + + @Override + protected void assertResults(ResultSet expected, ResultSet elastic) throws SQLException { + Logger log = logEsResultSet() ? logger : null; + + // + // uncomment this to printout the result set and create new CSV tests + // + //JdbcTestUtils.logLikeCLI(elastic, log); + JdbcAssert.assertResultSets(expected, elastic, log, true); + } + + @Override + protected boolean logEsResultSet() { + return true; + } + + @Override + protected final void doTest() throws Throwable { + try (Connection csv = csvConnection(testCase.expectedResults); Connection es = esJdbc()) { + + // pass the testName as table for debugging purposes (in case the underlying reader is missing) + ResultSet expected = executeCsvQuery(csv, testName); + ResultSet elasticResults = executeJdbcQuery(es, testCase.query); + assertResults(expected, elasticResults); + } + } +} \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java index e37688eb90465..99e8432370471 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvSpecTestCase.java @@ -6,14 +6,13 @@ package org.elasticsearch.xpack.qa.sql.jdbc; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; -import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; import java.sql.Connection; import java.sql.ResultSet; import java.util.ArrayList; import java.util.List; -import java.util.Properties; import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.csvConnection; import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.executeCsvQuery; @@ -57,13 +56,4 @@ protected final void doTest() throws Throwable { assertResults(expected, elasticResults); } } - - // make sure ES uses UTC (otherwise JDBC driver picks up the JVM timezone per spec/convention) - @Override - protected Properties connectionProperties() { - Properties connectionProperties = new Properties(); - connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); - return connectionProperties; - } - } diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java index fbbc2285ed123..ad26db3104758 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/CsvTestUtils.java @@ -190,7 +190,7 @@ public Object parse(String line) { } public static class CsvTestCase { - String query; - String expectedResults; + public String query; + public String expectedResults; } -} +} \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java index 655f02d97b8ad..05140577bcdf6 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java @@ -32,18 +32,28 @@ public class DataLoader { public static void main(String[] args) throws Exception { try (RestClient client = RestClient.builder(new HttpHost("localhost", 9200)).build()) { - loadDatasetIntoEs(client); + loadEmpDatasetIntoEs(client); Loggers.getLogger(DataLoader.class).info("Data loaded"); } } protected static void loadDatasetIntoEs(RestClient client) throws Exception { - loadDatasetIntoEs(client, "test_emp"); - loadDatasetIntoEs(client, "test_emp_copy"); + loadEmpDatasetIntoEs(client); + } + + protected static void loadEmpDatasetIntoEs(RestClient client) throws Exception { + loadEmpDatasetIntoEs(client, "test_emp"); + loadEmpDatasetIntoEs(client, "test_emp_copy"); makeAlias(client, "test_alias", "test_emp", "test_emp_copy"); makeAlias(client, "test_alias_emp", "test_emp", "test_emp_copy"); } + public static void loadDocsDatasetIntoEs(RestClient client) throws Exception { + loadEmpDatasetIntoEs(client, "emp"); + loadLibDatasetIntoEs(client, "library"); + makeAlias(client, "employees", "emp"); + } + private static void createString(String name, XContentBuilder builder) throws Exception { builder.startObject(name).field("type", "text") .startObject("fields") @@ -51,7 +61,8 @@ private static void createString(String name, XContentBuilder builder) throws Ex .endObject() .endObject(); } - protected static void loadDatasetIntoEs(RestClient client, String index) throws Exception { + + protected static void loadEmpDatasetIntoEs(RestClient client, String index) throws Exception { Request request = new Request("PUT", "/" + index); XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); createIndex.startObject("settings"); @@ -151,6 +162,52 @@ protected static void loadDatasetIntoEs(RestClient client, String index) throws client.performRequest(request); } + protected static void loadLibDatasetIntoEs(RestClient client, String index) throws Exception { + Request request = new Request("PUT", "/" + index); + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + createIndex.field("number_of_replicas", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("book"); + { + createIndex.startObject("properties"); + { + createString("name", createIndex); + createString("author", createIndex); + createIndex.startObject("release_date").field("type", "date").endObject(); + createIndex.startObject("page_count").field("type", "short").endObject(); + } + createIndex.endObject(); + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + request.setJsonEntity(Strings.toString(createIndex)); + client.performRequest(request); + + request = new Request("POST", "/" + index + "/book/_bulk"); + request.addParameter("refresh", "true"); + StringBuilder bulk = new StringBuilder(); + csvToLines("library", (titles, fields) -> { + bulk.append("{\"index\":{\"_id\":\"" + fields.get(0) + "\"}}\n"); + bulk.append("{"); + for (int f = 0; f < titles.size(); f++) { + if (f > 0) { + bulk.append(","); + } + bulk.append('"').append(titles.get(f)).append("\":\"").append(fields.get(f)).append('"'); + } + bulk.append("}\n"); + }); + request.setJsonEntity(bulk.toString()); + client.performRequest(request); + } + protected static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception { for (String index : indices) { client.performRequest(new Request("POST", "/" + index + "/_alias/" + aliasName)); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java index c0d3db026d8bd..c4d25f4311327 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DebugCsvSpec.java @@ -10,13 +10,11 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.CsvTestCase; -import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.util.List; -import java.util.Properties; import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.csvConnection; import static org.elasticsearch.xpack.qa.sql.jdbc.CsvTestUtils.executeCsvQuery; @@ -65,12 +63,4 @@ protected final void doTest() throws Throwable { assertResults(expected, elasticResults); } } - - // make sure ES uses UTC (otherwise JDBC driver picks up the JVM timezone per spec/convention) - @Override - protected Properties connectionProperties() { - Properties connectionProperties = new Properties(); - connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); - return connectionProperties; - } } \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java index 801f40639fad1..47f531ebd1f9b 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcAssert.java @@ -20,10 +20,20 @@ import java.util.TimeZone; import static java.lang.String.format; +import static java.sql.Types.BIGINT; +import static java.sql.Types.DOUBLE; +import static java.sql.Types.FLOAT; +import static java.sql.Types.INTEGER; +import static java.sql.Types.REAL; +import static java.sql.Types.SMALLINT; +import static java.sql.Types.TINYINT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +/** + * Utility class for doing JUnit-style asserts over JDBC. + */ public class JdbcAssert { private static final Calendar UTC_CALENDAR = Calendar.getInstance(TimeZone.getTimeZone("UTC"), Locale.ROOT); @@ -32,14 +42,29 @@ public static void assertResultSets(ResultSet expected, ResultSet actual) throws } public static void assertResultSets(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + assertResultSets(expected, actual, logger, false); + } + + /** + * Assert the given result sets, potentially in a lenient way. + * When lenient is specified, the type comparison of a column is widden to reach a common, compatible ground. + * This means promoting integer types to long and floating types to double and comparing their values. + * For example in a non-lenient, strict case a comparison between an int and a tinyint would fail, with lenient it will succeed as + * long as the actual value is the same. + */ + public static void assertResultSets(ResultSet expected, ResultSet actual, Logger logger, boolean lenient) throws SQLException { try (ResultSet ex = expected; ResultSet ac = actual) { - assertResultSetMetadata(ex, ac, logger); - assertResultSetData(ex, ac, logger); + assertResultSetMetadata(ex, ac, logger, lenient); + assertResultSetData(ex, ac, logger, lenient); } } - // metadata doesn't consume a ResultSet thus it shouldn't close it public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + assertResultSetMetadata(expected, actual, logger, false); + } + + // metadata doesn't consume a ResultSet thus it shouldn't close it + public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, Logger logger, boolean lenient) throws SQLException { ResultSetMetaData expectedMeta = expected.getMetaData(); ResultSetMetaData actualMeta = actual.getMetaData(); @@ -81,8 +106,8 @@ public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, } // use the type not the name (timestamp with timezone returns spaces for example) - int expectedType = expectedMeta.getColumnType(column); - int actualType = actualMeta.getColumnType(column); + int expectedType = typeOf(expectedMeta.getColumnType(column), lenient); + int actualType = typeOf(actualMeta.getColumnType(column), lenient); // since H2 cannot use a fixed timezone, the data is stored in UTC (and thus with timezone) if (expectedType == Types.TIMESTAMP_WITH_TIMEZONE) { @@ -92,6 +117,7 @@ public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, if (expectedType == Types.FLOAT && expected instanceof CsvResultSet) { expectedType = Types.REAL; } + // when lenient is used, an int is equivalent to a short, etc... assertEquals("Different column type for column [" + expectedName + "] (" + JDBCType.valueOf(expectedType) + " != " + JDBCType.valueOf(actualType) + ")", expectedType, actualType); } @@ -99,12 +125,16 @@ public static void assertResultSetMetadata(ResultSet expected, ResultSet actual, // The ResultSet is consumed and thus it should be closed public static void assertResultSetData(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + assertResultSetData(expected, actual, logger, false); + } + + public static void assertResultSetData(ResultSet expected, ResultSet actual, Logger logger, boolean lenient) throws SQLException { try (ResultSet ex = expected; ResultSet ac = actual) { - doAssertResultSetData(ex, ac, logger); + doAssertResultSetData(ex, ac, logger, lenient); } } - private static void doAssertResultSetData(ResultSet expected, ResultSet actual, Logger logger) throws SQLException { + private static void doAssertResultSetData(ResultSet expected, ResultSet actual, Logger logger, boolean lenient) throws SQLException { ResultSetMetaData metaData = expected.getMetaData(); int columns = metaData.getColumnCount(); @@ -118,10 +148,33 @@ private static void doAssertResultSetData(ResultSet expected, ResultSet actual, } for (int column = 1; column <= columns; column++) { - Object expectedObject = expected.getObject(column); - Object actualObject = actual.getObject(column); - int type = metaData.getColumnType(column); + Class expectedColumnClass = null; + try { + String columnClassName = metaData.getColumnClassName(column); + + // fix for CSV which returns the shortName not fully-qualified name + if (!columnClassName.contains(".")) { + switch (columnClassName) { + case "Timestamp": + columnClassName = "java.sql.Timestamp"; + break; + case "Int": + columnClassName = "java.lang.Integer"; + break; + default: + columnClassName = "java.lang." + columnClassName; + break; + } + } + + expectedColumnClass = Class.forName(columnClassName); + } catch (ClassNotFoundException cnfe) { + throw new SQLException(cnfe); + } + + Object expectedObject = expected.getObject(column); + Object actualObject = lenient ? actual.getObject(column, expectedColumnClass) : actual.getObject(column); String msg = format(Locale.ROOT, "Different result for column [" + metaData.getColumnName(column) + "], " + "entry [" + (count + 1) + "]"); @@ -161,4 +214,20 @@ else if (type == Types.DOUBLE) { } } + /** + * Returns the value of the given type either in a lenient fashion (widened) or strict. + */ + private static int typeOf(int columnType, boolean lenient) { + if (lenient) { + // integer upcast to long + if (columnType == TINYINT || columnType == SMALLINT || columnType == INTEGER || columnType == BIGINT) { + return BIGINT; + } + if (columnType == FLOAT || columnType == REAL || columnType == DOUBLE) { + return REAL; + } + } + + return columnType; + } } \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java index 5062525f2b31e..2bb4697749a3a 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcTestUtils.java @@ -6,10 +6,16 @@ package org.elasticsearch.xpack.qa.sql.jdbc; import org.apache.logging.log4j.Logger; +import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import java.sql.JDBCType; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.List; public abstract class JdbcTestUtils { @@ -96,4 +102,36 @@ private static StringBuilder trimOrPad(StringBuilder buffer) { } return buffer; } + + public static void logLikeCLI(ResultSet rs, Logger logger) throws SQLException { + ResultSetMetaData metaData = rs.getMetaData(); + int columns = metaData.getColumnCount(); + + List cols = new ArrayList<>(columns); + + for (int i = 1; i <= columns; i++) { + cols.add(new ColumnInfo(metaData.getTableName(i), metaData.getColumnName(i), metaData.getColumnTypeName(i), + JDBCType.valueOf(metaData.getColumnType(i)), metaData.getColumnDisplaySize(i))); + } + + + List> data = new ArrayList<>(); + + while (rs.next()) { + List entry = new ArrayList<>(columns); + for (int i = 1; i <= columns; i++) { + Object value = rs.getObject(i); + // timestamp to string is similar but not ISO8601 - fix it + if (value instanceof Timestamp) { + Timestamp ts = (Timestamp) value; + value = ts.toInstant().toString(); + } + entry.add(value); + } + data.add(entry); + } + + CliFormatter formatter = new CliFormatter(cols, data); + logger.info("\n" + formatter.formatWithHeader(cols, data)); + } } \ No newline at end of file diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java index d8ba1ade959ae..9ece8d7d1d33c 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java @@ -8,8 +8,10 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; import org.junit.AfterClass; import org.junit.Before; @@ -28,6 +30,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Properties; /** * Tests that compare the Elasticsearch JDBC client to some other JDBC client @@ -50,11 +53,19 @@ public SpecBaseIntegrationTestCase(String fileName, String groupName, String tes @Before public void setupTestDataIfNeeded() throws Exception { - if (client().performRequest(new Request("HEAD", "/test_emp")).getStatusLine().getStatusCode() == 404) { - DataLoader.loadDatasetIntoEs(client()); + if (client().performRequest(new Request("HEAD", "/" + indexName())).getStatusLine().getStatusCode() == 404) { + loadDataset(client()); } } + protected String indexName() { + return "test_emp"; + } + + protected void loadDataset(RestClient client) throws Exception { + DataLoader.loadEmpDatasetIntoEs(client); + } + @Override protected boolean preserveIndicesUponCompletion() { return true; @@ -95,6 +106,14 @@ protected ResultSet executeJdbcQuery(Connection con, String query) throws SQLExc return statement.executeQuery(query); } + // TODO: use UTC for now until deciding on a strategy for handling date extraction + @Override + protected Properties connectionProperties() { + Properties connectionProperties = new Properties(); + connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); + return connectionProperties; + } + protected boolean logEsResultSet() { return false; } diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java index f1bcef6f750fc..3b5cae742d34b 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SqlSpecTestCase.java @@ -7,14 +7,12 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; import org.junit.ClassRule; import java.sql.Connection; import java.sql.ResultSet; import java.util.ArrayList; import java.util.List; -import java.util.Properties; /** * Tests comparing sql queries executed against our jdbc client @@ -67,12 +65,4 @@ protected final void doTest() throws Throwable { assertResults(expected, elasticResults); } } - - // TODO: use UTC for now until deciding on a strategy for handling date extraction - @Override - protected Properties connectionProperties() { - Properties connectionProperties = new Properties(); - connectionProperties.setProperty(JdbcConfiguration.TIME_ZONE, "UTC"); - return connectionProperties; - } } diff --git a/x-pack/qa/sql/src/main/resources/docs.csv-spec b/x-pack/qa/sql/src/main/resources/docs.csv-spec new file mode 100644 index 0000000000000..8bf74bc4a2f89 --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/docs.csv-spec @@ -0,0 +1,639 @@ +// +// CSV spec used by the docs +// + +/////////////////////////////// +// +// Describe table +// +/////////////////////////////// + +describeTable +// tag::describeTable +DESCRIBE emp; + + column | type +--------------------+--------------- +birth_date |TIMESTAMP +dep |STRUCT +dep.dep_id |VARCHAR +dep.dep_name |VARCHAR +dep.dep_name.keyword|VARCHAR +dep.from_date |TIMESTAMP +dep.to_date |TIMESTAMP +emp_no |INTEGER +first_name |VARCHAR +first_name.keyword |VARCHAR +gender |VARCHAR +hire_date |TIMESTAMP +languages |TINYINT +last_name |VARCHAR +last_name.keyword |VARCHAR +salary |INTEGER + +// end::describeTable +; + +//describeTableAlias +// tag::describeTableAlias +//DESCRIBE employee; + +// column | type +//---------------+--------------- + +// end::describeTableAlias +//; + +// +// Show columns +// +showColumns +// tag::showColumns +SHOW COLUMNS IN emp; + + column | type +--------------------+--------------- +birth_date |TIMESTAMP +dep |STRUCT +dep.dep_id |VARCHAR +dep.dep_name |VARCHAR +dep.dep_name.keyword|VARCHAR +dep.from_date |TIMESTAMP +dep.to_date |TIMESTAMP +emp_no |INTEGER +first_name |VARCHAR +first_name.keyword |VARCHAR +gender |VARCHAR +hire_date |TIMESTAMP +languages |TINYINT +last_name |VARCHAR +last_name.keyword |VARCHAR +salary |INTEGER + +// end::showColumns +; + +//showColumnsInAlias +// tag::showColumnsInAlias +//SHOW COLUMNS FROM employee; + +// column | type +//---------------+--------------- + +// end::showColumnsInAlias +//; + +/////////////////////////////// +// +// Show Tables +// +/////////////////////////////// + +showTables +// tag::showTables +SHOW TABLES; + + name | type +---------------+--------------- +emp |BASE TABLE +employees |ALIAS +library |BASE TABLE + +// end::showTables +; + +showTablesLikeExact +// tag::showTablesLikeExact +SHOW TABLES LIKE 'emp'; + + name | type +---------------+--------------- +emp |BASE TABLE + +// end::showTablesLikeExact +; + +showTablesLikeWildcard +// tag::showTablesLikeWildcard +SHOW TABLES LIKE 'emp%'; + + name | type +---------------+--------------- +emp |BASE TABLE +employees |ALIAS + +// end::showTablesLikeWildcard +; + + +showTablesLikeOneChar +// tag::showTablesLikeOneChar +SHOW TABLES LIKE 'em_'; + + name | type +---------------+--------------- +emp |BASE TABLE + +// end::showTablesLikeOneChar +; + +showTablesLikeMixed +// tag::showTablesLikeMixed +SHOW TABLES LIKE '%em_'; + + name | type +---------------+--------------- +emp |BASE TABLE + +// end::showTablesLikeMixed +; + +/////////////////////////////// +// +// Show Functions +// +/////////////////////////////// + +showFunctions +// tag::showFunctions +SHOW FUNCTIONS; + + name | type +----------------+--------------- +AVG |AGGREGATE +COUNT |AGGREGATE +MAX |AGGREGATE +MIN |AGGREGATE +SUM |AGGREGATE +STDDEV_POP |AGGREGATE +VAR_POP |AGGREGATE +PERCENTILE |AGGREGATE +PERCENTILE_RANK |AGGREGATE +SUM_OF_SQUARES |AGGREGATE +SKEWNESS |AGGREGATE +KURTOSIS |AGGREGATE +DAY_OF_MONTH |SCALAR +DAY |SCALAR +DOM |SCALAR +DAY_OF_WEEK |SCALAR +DOW |SCALAR +DAY_OF_YEAR |SCALAR +DOY |SCALAR +HOUR_OF_DAY |SCALAR +HOUR |SCALAR +MINUTE_OF_DAY |SCALAR +MINUTE_OF_HOUR |SCALAR +MINUTE |SCALAR +SECOND_OF_MINUTE|SCALAR +SECOND |SCALAR +MONTH_OF_YEAR |SCALAR +MONTH |SCALAR +YEAR |SCALAR +WEEK_OF_YEAR |SCALAR +WEEK |SCALAR +ABS |SCALAR +ACOS |SCALAR +ASIN |SCALAR +ATAN |SCALAR +ATAN2 |SCALAR +CBRT |SCALAR +CEIL |SCALAR +CEILING |SCALAR +COS |SCALAR +COSH |SCALAR +COT |SCALAR +DEGREES |SCALAR +E |SCALAR +EXP |SCALAR +EXPM1 |SCALAR +FLOOR |SCALAR +LOG |SCALAR +LOG10 |SCALAR +MOD |SCALAR +PI |SCALAR +POWER |SCALAR +RADIANS |SCALAR +RANDOM |SCALAR +RAND |SCALAR +ROUND |SCALAR +SIGN |SCALAR +SIGNUM |SCALAR +SIN |SCALAR +SINH |SCALAR +SQRT |SCALAR +TAN |SCALAR +SCORE |SCORE + +// end::showFunctions +; + +showFunctionsLikeExact +// tag::showFunctionsLikeExact +SHOW FUNCTIONS LIKE 'ABS'; + + name | type +---------------+--------------- +ABS |SCALAR + +// end::showFunctionsLikeExact +; + +showFunctionsLikeWildcard +// tag::showFunctionsLikeWildcard +SHOW FUNCTIONS LIKE 'A%'; + + name | type +---------------+--------------- +AVG |AGGREGATE +ABS |SCALAR +ACOS |SCALAR +ASIN |SCALAR +ATAN |SCALAR +ATAN2 |SCALAR +// end::showFunctionsLikeWildcard +; + +showFunctionsLikeChar +// tag::showFunctionsLikeChar +SHOW FUNCTIONS LIKE 'A__'; + + name | type +---------------+--------------- +AVG |AGGREGATE +ABS |SCALAR +// end::showFunctionsLikeChar +; + +showFunctionsWithPattern +// tag::showFunctionsWithPattern +SHOW FUNCTIONS '%DAY%'; + + name | type +---------------+--------------- +DAY_OF_MONTH |SCALAR +DAY |SCALAR +DAY_OF_WEEK |SCALAR +DAY_OF_YEAR |SCALAR +HOUR_OF_DAY |SCALAR +MINUTE_OF_DAY |SCALAR + +// end::showFunctionsWithPattern +; + +/////////////////////////////// +// +// Select +// +/////////////////////////////// + +selectColumnAlias +// tag::selectColumnAlias +SELECT 1 + 1 AS result + + result +--------------- +2 + +// end::selectColumnAlias +; + +selectInline +// tag::selectInline +SELECT 1 + 1; + + (1 + 1) +--------------- +2 + +// end::selectInline +; + +selectColumn +// tag::selectColumn +SELECT emp_no FROM emp LIMIT 1; + + emp_no +--------------- +10001 + +// end::selectColumn +; + +selectQualifiedColumn +// tag::selectQualifiedColumn +SELECT emp.emp_no FROM emp LIMIT 1; + + emp_no +--------------- +10001 + +// end::selectQualifiedColumn +; + + +wildcardWithOrder +// tag::wildcardWithOrder +SELECT * FROM emp LIMIT 1; + + birth_date | emp_no | first_name | gender | hire_date | languages | last_name | salary +--------------------+---------------+---------------+---------------+--------------------+---------------+---------------+--------------- +1953-09-02T00:00:00Z|10001 |Georgi |M |1986-06-26T00:00:00Z|2 |Facello |57305 + +// end::wildcardWithOrder +; + +fromTable +// tag::fromTable +SELECT * FROM emp LIMIT 1; + + birth_date | emp_no | first_name | gender | hire_date | languages | last_name | salary +--------------------+---------------+---------------+---------------+--------------------+---------------+---------------+--------------- +1953-09-02T00:00:00Z|10001 |Georgi |M |1986-06-26T00:00:00Z|2 |Facello |57305 + + +// end::fromTable +; + +fromTableQuoted +// tag::fromTableQuoted +SELECT * FROM "emp" LIMIT 1; + + birth_date | emp_no | first_name | gender | hire_date | languages | last_name | salary +--------------------+---------------+---------------+---------------+--------------------+---------------+---------------+--------------- +1953-09-02T00:00:00Z|10001 |Georgi |M |1986-06-26T00:00:00Z|2 |Facello |57305 + +// end::fromTableQuoted +; + +fromTableQuoted +// tag::fromTablePatternQuoted +SELECT emp_no FROM "e*p" LIMIT 1; + + emp_no +--------------- +10001 + +// end::fromTablePatternQuoted +; + +fromTableAlias +// tag::fromTableAlias +SELECT e.emp_no FROM emp AS e LIMIT 1; + + emp_no +------------- +10001 + +// end::fromTableAlias +; + +basicWhere +// tag::basicWhere +SELECT last_name FROM emp WHERE emp_no = 10001; + + last_name +--------------- +Facello + +// end::basicWhere +; + +/////////////////////////////// +// +// Group By +// +/////////////////////////////// + +groupByColumn +// tag::groupByColumn +SELECT gender AS g FROM emp GROUP BY gender; + + g +--------------- +F +M + +// end::groupByColumn +; + +groupByOrdinal +// tag::groupByOrdinal +SELECT gender FROM emp GROUP BY 1; + + gender +--------------- +F +M + +// end::groupByOrdinal +; + +groupByAlias +// tag::groupByAlias +SELECT gender AS g FROM emp GROUP BY g; + + g +--------------- +F +M + +// end::groupByAlias +; + +groupByExpression +// tag::groupByExpression +SELECT languages + 1 AS l FROM emp GROUP BY l; + + l +--------------- +2 +3 +4 +5 +6 + + +// end::groupByExpression +; + +groupByAndAgg +// tag::groupByAndAgg +SELECT gender AS g, COUNT(*) AS c FROM emp GROUP BY gender; + + g | c +---------------+--------------- +F |37 +M |63 + +// end::groupByAndAgg +; + +groupByAndAggExpression +// tag::groupByAndAggExpression +SELECT gender AS g, ROUND(MIN(salary) / 100) AS salary FROM emp GROUP BY gender; + + g | salary +---------------+--------------- +F |260 +M |253 + +// end::groupByAndAggExpression +; + +groupByAndMultipleAggs +// tag::groupByAndMultipleAggs +SELECT gender AS g, KURTOSIS(salary) AS k, SKEWNESS(salary) AS s FROM emp GROUP BY gender; + + g | k | s +---------------+------------------+------------------- +F |1.8427808415250482|0.04517149340491813 +M |2.259327644285826 |0.40268950715550333 + +// end::groupByAndMultipleAggs +; + +groupByImplicitCount +// tag::groupByImplicitCount +SELECT COUNT(*) AS count FROM emp; + + count +--------------- +100 + +// end::groupByImplicitCount +; + +/////////////////////////////// +// +// Having +// +/////////////////////////////// + +groupByHaving +// tag::groupByHaving +SELECT languages AS l, COUNT(*) AS c FROM emp GROUP BY l HAVING c BETWEEN 15 AND 20; + + l | c +---------------+--------------- +1 |16 +2 |20 +4 |18 + +// end::groupByHaving +; + +groupByHavingMultiple +// tag::groupByHavingMultiple +SELECT MIN(salary) AS min, MAX(salary) AS max, MAX(salary) - MIN(salary) AS diff FROM emp GROUP BY languages HAVING diff - max % min > 0 AND AVG(salary) > 30000; + + min | max | diff +---------------+---------------+--------------- +25976 |73717 |47741 +29175 |73578 |44403 +26436 |74999 |48563 +27215 |74572 |47357 +25324 |73851 |48527 + +// end::groupByHavingMultiple +; + +groupByImplicitMultipleAggs +// tag::groupByImplicitMultipleAggs +SELECT MIN(salary) AS min, MAX(salary) AS max, AVG(salary) AS avg, COUNT(*) AS count FROM emp; + + min | max | avg | count +---------------+---------------+---------------+--------------- +25324 |74999 |48248 |100 + +// end::groupByImplicitMultipleAggs +; + +groupByHavingImplicitMatch +// tag::groupByHavingImplicitMatch +SELECT MIN(salary) AS min, MAX(salary) AS max FROM emp HAVING min > 25000; + + min | max +---------------+--------------- +25324 |74999 + +// end::groupByHavingImplicitMatch +; + +//groupByHavingImplicitNoMatch +// tag::groupByHavingImplicitNoMatch +//SELECT MIN(salary) AS min, MAX(salary) AS max FROM emp HAVING max > 75000; + +// min | max +//---------------+--------------- + +// end::groupByHavingImplicitNoMatch +//; + +/////////////////////////////// +// +// Order by +// +/////////////////////////////// + +orderByBasic +// tag::orderByBasic +SELECT * FROM library ORDER BY page_count DESC LIMIT 5; + + author | name | page_count | release_date +-----------------+--------------------+---------------+-------------------- +Peter F. Hamilton|Pandora's Star |768 |2004-03-02T00:00:00Z +Vernor Vinge |A Fire Upon the Deep|613 |1992-06-01T00:00:00Z +Frank Herbert |Dune |604 |1965-06-01T00:00:00Z +Alastair Reynolds|Revelation Space |585 |2000-03-15T00:00:00Z +James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00Z + + + +// end::orderByBasic +; + +orderByScore +// tag::orderByScore +SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY SCORE() DESC; + + SCORE() | author | name | page_count | release_date +---------------+---------------+-------------------+---------------+-------------------- +2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00Z +1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00Z +1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00Z +1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00Z + +// end::orderByScore +; + +orderByScoreWithMatch +// tag::orderByScoreWithMatch +SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY page_count DESC; + + SCORE() | author | name | page_count | release_date +---------------+---------------+-------------------+---------------+-------------------- +2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00Z +1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00Z +1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00Z +1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00Z + +// end::orderByScoreWithMatch +; + + +/////////////////////////////// +// +// Limit +// +/////////////////////////////// + +limitBasic +// tag::limitBasic +SELECT first_name, last_name, emp_no FROM emp LIMIT 1; + + first_name | last_name | emp_no +---------------+---------------+--------------- +Georgi |Facello |10001 + +// end::limitBasic +; diff --git a/x-pack/qa/sql/src/main/resources/library.csv b/x-pack/qa/sql/src/main/resources/library.csv new file mode 100644 index 0000000000000..a93be21abe63e --- /dev/null +++ b/x-pack/qa/sql/src/main/resources/library.csv @@ -0,0 +1,25 @@ +name,author,release_date,page_count +Leviathan Wakes,James S.A. Corey,2011-06-02T00:00:00Z,561 +Hyperion,Dan Simmons,1989-05-26T00:00:00Z,482 +Dune,Frank Herbert,1965-06-01T00:00:00Z,604 +Dune Messiah,Frank Herbert,1969-10-15T00:00:00Z,331 +Children of Dune,Frank Herbert,1976-04-21T00:00:00Z,408 +God Emperor of Dune,Frank Herbert,1981-05-28T00:00:00Z,454 +Consider Phlebas,Iain M. Banks,1987-04-23T00:00:00Z,471 +Pandora's Star,Peter F. Hamilton,2004-03-02T00:00:00Z,768 +Revelation Space,Alastair Reynolds,2000-03-15T00:00:00Z,585 +A Fire Upon the Deep,Vernor Vinge,1992-06-01T00:00:00Z,613 +Ender's Game,Orson Scott Card,1985-06-01T00:00:00Z,324 +1984,George Orwell,1985-06-01T00:00:00Z,328 +Fahrenheit 451,Ray Bradbury,1953-10-15T00:00:00Z,227 +Brave New World,Aldous Huxley,1932-06-01T00:00:00Z,268 +Foundation,Isaac Asimov,1951-06-01T00:00:00Z,224 +The Giver,Lois Lowry,1993-04-26T00:00:00Z,208 +Slaughterhouse-Five,Kurt Vonnegut,1969-06-01T00:00:00Z,275 +The Hitchhiker's Guide to the Galaxy,Douglas Adams,1979-10-12T00:00:00Z,180 +Snow Crash,Neal Stephenson,1992-06-01T00:00:00Z,470 +Neuromancer,William Gibson,1984-07-01T00:00:00Z,271 +The Handmaid's Tale,Margaret Atwood,1985-06-01T00:00:00Z,311 +Starship Troopers,Robert A. Heinlein,1959-12-01T00:00:00Z,335 +The Left Hand of Darkness,Ursula K. Le Guin,1969-06-01T00:00:00Z,304 +The Moon is a Harsh Mistress,Robert A. Heinlein,1966-04-01T00:00:00Z,288 diff --git a/x-pack/qa/sql/src/main/resources/select.sql-spec b/x-pack/qa/sql/src/main/resources/select.sql-spec index 76562a07c86f7..ce57606e35b0c 100644 --- a/x-pack/qa/sql/src/main/resources/select.sql-spec +++ b/x-pack/qa/sql/src/main/resources/select.sql-spec @@ -3,9 +3,7 @@ // wildcardWithOrder -// tag::wildcardWithOrder SELECT * FROM test_emp ORDER BY emp_no; -// end::wildcardWithOrder column SELECT last_name FROM "test_emp" ORDER BY emp_no; columnWithAlias From 093ea037b4cb6a747583a35e53b4df740fd4a36e Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Tue, 3 Jul 2018 17:19:48 +0300 Subject: [PATCH 20/36] [DOCS] Typos --- docs/reference/sql/language/syntax/select.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/sql/language/syntax/select.asciidoc b/docs/reference/sql/language/syntax/select.asciidoc index ad3b564bb00d0..b58173097b0ae 100644 --- a/docs/reference/sql/language/syntax/select.asciidoc +++ b/docs/reference/sql/language/syntax/select.asciidoc @@ -177,7 +177,7 @@ And grouping by column expression (typically used along-side an alias): include-tagged::{sql-specs}/docs.csv-spec[groupByExpression] ---- -When a `GROUP BY` clause is used in a `SELECT`, _all_ output expressions must be either aggregate functions or expressions used for grouping or derivates of (otherwise there would be more than one possible value to return for each ungrouped column). +When a `GROUP BY` clause is used in a `SELECT`, _all_ output expressions must be either aggregate functions or expressions used for grouping or derivatives of (otherwise there would be more than one possible value to return for each ungrouped column). To wit: @@ -331,8 +331,8 @@ include-tagged::{sql-specs}/docs.csv-spec[orderByScoreWithMatch] ---- NOTE: -Trying to return `score` from a non full-text queries will return the same value for all results, as -all are equilley relevant. +Trying to return `score` from a non full-text query will return the same value for all results, as +all are equally relevant. [[sql-syntax-limit]] [float] From 1d114071dabc8b5208c797a9bbc3b990680577b7 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Tue, 3 Jul 2018 15:57:02 +0100 Subject: [PATCH 21/36] Add analyze API to high-level rest client (#31577) --- .../elasticsearch/client/IndicesClient.java | 30 +++++ .../client/RequestConverters.java | 13 ++ .../elasticsearch/client/IndicesClientIT.java | 18 +++ .../client/RequestConvertersTests.java | 17 +++ .../IndicesClientDocumentationIT.java | 126 ++++++++++++++++++ .../high-level/indices/analyze.asciidoc | 119 +++++++++++++++++ .../high-level/supported-apis.asciidoc | 1 + .../admin/indices/analyze/AnalyzeRequest.java | 48 ++++++- .../indices/analyze/AnalyzeResponse.java | 116 +++++++++++++++- .../analyze/DetailAnalyzeResponse.java | 125 ++++++++++++++++- .../indices/analyze/AnalyzeResponseTests.java | 115 ++++++++++++++++ 11 files changed, 720 insertions(+), 8 deletions(-) create mode 100644 docs/java-rest/high-level/indices/analyze.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 28a9cc2036673..641480535c5ad 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; @@ -752,4 +754,32 @@ public void getTemplateAsync(GetIndexTemplatesRequest getIndexTemplatesRequest, restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, RequestConverters::getTemplates, options, GetIndexTemplatesResponse::fromXContent, listener, emptySet()); } + + /** + * Calls the analyze API + * + * See Analyze API on elastic.co + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public AnalyzeResponse analyze(AnalyzeRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::analyze, options, + AnalyzeResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously calls the analyze API + * + * See Analyze API on elastic.co + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void analyzeAsync(AnalyzeRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::analyze, options, + AnalyzeResponse::fromXContent, listener, emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index ee4b56e8a3b85..26f0b5c647404 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -45,6 +45,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -1019,6 +1020,18 @@ static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRequest) t return request; } + static Request analyze(AnalyzeRequest request) throws IOException { + EndpointBuilder builder = new EndpointBuilder(); + String index = request.index(); + if (index != null) { + builder.addPathPart(index); + } + builder.addPathPartAsIs("_analyze"); + Request req = new Request(HttpGet.METHOD_NAME, builder.build()); + req.setEntity(createEntity(request, REQUEST_BODY_CONTENT_TYPE)); + return req; + } + static Request getScript(GetStoredScriptRequest getStoredScriptRequest) { String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(getStoredScriptRequest.id()).build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 5f8e6b5d36526..405653a3841eb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -29,6 +29,8 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; @@ -1278,4 +1280,20 @@ public void testGetIndexTemplate() throws Exception { new GetIndexTemplatesRequest().names("the-template-*"), client.indices()::getTemplate, client.indices()::getTemplateAsync)); assertThat(notFound.status(), equalTo(RestStatus.NOT_FOUND)); } + + public void testAnalyze() throws Exception { + + RestHighLevelClient client = highLevelClient(); + + AnalyzeRequest noindexRequest = new AnalyzeRequest().text("One two three").analyzer("english"); + AnalyzeResponse noindexResponse = execute(noindexRequest, client.indices()::analyze, client.indices()::analyzeAsync); + + assertThat(noindexResponse.getTokens(), hasSize(3)); + + AnalyzeRequest detailsRequest = new AnalyzeRequest().text("One two three").analyzer("english").explain(true); + AnalyzeResponse detailsResponse = execute(detailsRequest, client.indices()::analyze, client.indices()::analyzeAsync); + + assertNotNull(detailsResponse.detail()); + + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index ce72ecc8a59b7..aa3788af7494b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -2239,6 +2240,22 @@ public void testGetTemplateRequest() throws Exception { assertThat(request.getEntity(), nullValue()); } + public void testAnalyzeRequest() throws Exception { + AnalyzeRequest indexAnalyzeRequest = new AnalyzeRequest() + .text("Here is some text") + .index("test_index") + .analyzer("test_analyzer"); + + Request request = RequestConverters.analyze(indexAnalyzeRequest); + assertThat(request.getEndpoint(), equalTo("/test_index/_analyze")); + assertToXContentBody(indexAnalyzeRequest, request.getEntity()); + + AnalyzeRequest analyzeRequest = new AnalyzeRequest() + .text("more text") + .analyzer("test_analyzer"); + assertThat(RequestConverters.analyze(analyzeRequest).getEndpoint(), equalTo("/_analyze")); + } + public void testGetScriptRequest() { GetStoredScriptRequest getStoredScriptRequest = new GetStoredScriptRequest("x-script"); Map expectedParams = new HashMap<>(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 95fa7f7185b5b..964757db372ae 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -27,6 +27,9 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; +import org.elasticsearch.action.admin.indices.analyze.DetailAnalyzeResponse; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; @@ -2317,4 +2320,127 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + public void testAnalyze() throws IOException, InterruptedException { + + RestHighLevelClient client = highLevelClient(); + + { + // tag::analyze-builtin-request + AnalyzeRequest request = new AnalyzeRequest(); + request.text("Some text to analyze", "Some more text to analyze"); // <1> + request.analyzer("english"); // <2> + // end::analyze-builtin-request + } + + { + // tag::analyze-custom-request + AnalyzeRequest request = new AnalyzeRequest(); + request.text("Some text to analyze"); + request.addCharFilter("html_strip"); // <1> + request.tokenizer("standard"); // <2> + request.addTokenFilter("lowercase"); // <3> + + Map stopFilter = new HashMap<>(); + stopFilter.put("type", "stop"); + stopFilter.put("stopwords", new String[]{ "to" }); // <4> + request.addTokenFilter(stopFilter); // <5> + // end::analyze-custom-request + } + + { + // tag::analyze-custom-normalizer-request + AnalyzeRequest request = new AnalyzeRequest(); + request.text("BaR"); + request.addTokenFilter("lowercase"); + // end::analyze-custom-normalizer-request + + // tag::analyze-request-explain + request.explain(true); // <1> + request.attributes("keyword", "type"); // <2> + // end::analyze-request-explain + + // tag::analyze-request-sync + AnalyzeResponse response = client.indices().analyze(request, RequestOptions.DEFAULT); + // end::analyze-request-sync + + // tag::analyze-response-tokens + List tokens = response.getTokens(); // <1> + // end::analyze-response-tokens + // tag::analyze-response-detail + DetailAnalyzeResponse detail = response.detail(); // <1> + // end::analyze-response-detail + + assertNull(tokens); + assertNotNull(detail.tokenizer()); + } + + CreateIndexRequest req = new CreateIndexRequest("my_index"); + CreateIndexResponse resp = client.indices().create(req, RequestOptions.DEFAULT); + assertTrue(resp.isAcknowledged()); + + PutMappingRequest pmReq = new PutMappingRequest() + .indices("my_index") + .type("_doc") + .source("my_field", "type=text,analyzer=english"); + PutMappingResponse pmResp = client.indices().putMapping(pmReq, RequestOptions.DEFAULT); + assertTrue(pmResp.isAcknowledged()); + + { + // tag::analyze-index-request + AnalyzeRequest request = new AnalyzeRequest(); + request.index("my_index"); // <1> + request.analyzer("my_analyzer"); // <2> + request.text("some text to analyze"); + // end::analyze-index-request + + // tag::analyze-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AnalyzeResponse analyzeTokens) { + + } + + @Override + public void onFailure(Exception e) { + + } + }; + // end::analyze-execute-listener + + // use a built-in analyzer in the test + request = new AnalyzeRequest(); + request.index("my_index"); + request.field("my_field"); + request.text("some text to analyze"); + // Use a blocking listener in the test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::analyze-request-async + client.indices().analyzeAsync(request, RequestOptions.DEFAULT, listener); + // end::analyze-request-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + { + // tag::analyze-index-normalizer-request + AnalyzeRequest request = new AnalyzeRequest(); + request.index("my_index"); // <1> + request.normalizer("my_normalizer"); // <2> + request.text("some text to analyze"); + // end::analyze-index-normalizer-request + } + + { + // tag::analyze-field-request + AnalyzeRequest request = new AnalyzeRequest(); + request.index("my_index"); + request.field("my_field"); + request.text("some text to analyze"); + // end::analyze-field-request + } + + } } diff --git a/docs/java-rest/high-level/indices/analyze.asciidoc b/docs/java-rest/high-level/indices/analyze.asciidoc new file mode 100644 index 0000000000000..4bffe2f020382 --- /dev/null +++ b/docs/java-rest/high-level/indices/analyze.asciidoc @@ -0,0 +1,119 @@ +[[java-rest-high-analyze]] +=== Analyze API + +[[java-rest-high-analyze-request]] +==== Analyze Request + +An `AnalyzeRequest` contains the text to analyze, and one of several options to +specify how the analysis should be performed. + +The simplest version uses a built-in analyzer: + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-builtin-request] +--------------------------------------------------- +<1> The text to include. Multiple strings are treated as a multi-valued field +<2> A built-in analyzer + +You can configure a custom analyzer: +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-custom-request] +--------------------------------------------------- +<1> Configure char filters +<2> Configure the tokenizer +<3> Add a built-in tokenfilter +<4> Configuration for a custom tokenfilter +<5> Add the custom tokenfilter + +You can also build a custom normalizer, by including only charfilters and +tokenfilters: +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-custom-normalizer-request] +--------------------------------------------------- + +You can analyze text using an analyzer defined in an existing index: +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-index-request] +--------------------------------------------------- +<1> The index containing the mappings +<2> The analyzer defined on this index to use + +Or you can use a normalizer: +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-index-normalizer-request] +--------------------------------------------------- +<1> The index containing the mappings +<2> The normalizer defined on this index to use + +You can analyze text using the mappings for a particular field in an index: +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-field-request] +--------------------------------------------------- + +==== Optional arguemnts +The following arguments can also optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-request-explain] +--------------------------------------------------- +<1> Setting `explain` to true will add further details to the response +<2> Setting `attributes` allows you to return only token attributes that you are +interested in + +[[java-rest-high-analyze-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-request-sync] +--------------------------------------------------- + +[[java-rest-high-analyze-async]] +==== Asynchronous Execution + +The asynchronous execution of an analyze request requires both the `AnalyzeRequest` +instance and an `ActionListener` instance to be passed to the asyncronous method: + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-request-async] +--------------------------------------------------- + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method if the +execution successfully completed or using the `onFailure` method if it failed. + +A typical listener for `AnalyzeResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-execute-listener] +--------------------------------------------------- + +[[java-rest-high-analyze-response]] +==== Analyze Response + +The returned `AnalyzeResponse` allows you to retrieve details of the analysis as +follows: +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-response-tokens] +--------------------------------------------------- +<1> `AnalyzeToken` holds information about the individual tokens produced by analysis + +If `explain` was set to `true`, then information is instead returned from the `detail()` +method: + +["source","java",subs="attributes,callouts,macros"] +--------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[analyze-response-detail] +--------------------------------------------------- +<1> `DetailAnalyzeResponse` holds more detailed information about tokens produced by +the various substeps in the analysis chain. \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 93513a042adec..0faf73e59074a 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -88,6 +88,7 @@ Alias Management:: * <> * <> +include::indices/analyze.asciidoc[] include::indices/create_index.asciidoc[] include::indices/delete_index.asciidoc[] include::indices/indices_exists.asciidoc[] diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java index d9c018848d7e8..09686025e9da9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java @@ -26,6 +26,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -42,7 +44,7 @@ * A request to analyze a text associated with a specific index. Allow to provide * the actual analyzer name to perform the analysis with. */ -public class AnalyzeRequest extends SingleShardRequest { +public class AnalyzeRequest extends SingleShardRequest implements ToXContentObject { private String[] text; @@ -62,7 +64,7 @@ public class AnalyzeRequest extends SingleShardRequest { private String normalizer; - public static class NameOrDefinition implements Writeable { + public static class NameOrDefinition implements Writeable, ToXContentFragment { // exactly one of these two members is not null public final String name; public final Settings definition; @@ -102,6 +104,15 @@ public void writeTo(StreamOutput out) throws IOException { Settings.writeSettingsToStream(definition, out); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (definition == null) { + return builder.value(name); + } + return definition.toXContent(builder, params); + } + } public AnalyzeRequest() { @@ -171,6 +182,7 @@ public AnalyzeRequest addCharFilter(String charFilter) { this.charFilters.add(new NameOrDefinition(charFilter)); return this; } + public List charFilters() { return this.charFilters; } @@ -260,4 +272,36 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(normalizer); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("text", text); + if (Strings.isNullOrEmpty(analyzer) == false) { + builder.field("analyzer", analyzer); + } + if (tokenizer != null) { + tokenizer.toXContent(builder, params); + } + if (tokenFilters.size() > 0) { + builder.field("filter", tokenFilters); + } + if (charFilters.size() > 0) { + builder.field("char_filter", charFilters); + } + if (Strings.isNullOrEmpty(field) == false) { + builder.field("field", field); + } + if (explain) { + builder.field("explain", true); + } + if (attributes.length > 0) { + builder.field("attributes", attributes); + } + if (Strings.isNullOrEmpty(normalizer) == false) { + builder.field("normalizer", normalizer); + } + return builder.endObject(); + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java index 1e54def2385f8..d45ab2682a5ec 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java @@ -20,17 +20,27 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; public class AnalyzeResponse extends ActionResponse implements Iterable, ToXContentObject { @@ -46,6 +56,25 @@ public static class AnalyzeToken implements Streamable, ToXContentObject { AnalyzeToken() { } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnalyzeToken that = (AnalyzeToken) o; + return startOffset == that.startOffset && + endOffset == that.endOffset && + position == that.position && + positionLength == that.positionLength && + Objects.equals(term, that.term) && + Objects.equals(attributes, that.attributes) && + Objects.equals(type, that.type); + } + + @Override + public int hashCode() { + return Objects.hash(term, startOffset, endOffset, position, positionLength, attributes, type); + } + public AnalyzeToken(String term, int position, int startOffset, int endOffset, int positionLength, String type, Map attributes) { this.term = term; @@ -97,7 +126,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.POSITION_LENGTH, positionLength); } if (attributes != null && !attributes.isEmpty()) { - for (Map.Entry entity : attributes.entrySet()) { + Map sortedAttributes = new TreeMap<>(attributes); + for (Map.Entry entity : sortedAttributes.entrySet()) { builder.field(entity.getKey(), entity.getValue()); } } @@ -111,6 +141,50 @@ public static AnalyzeToken readAnalyzeToken(StreamInput in) throws IOException { return analyzeToken; } + public static AnalyzeToken fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + String field = null; + String term = ""; + int position = -1; + int startOffset = -1; + int endOffset = -1; + int positionLength = 1; + String type = ""; + Map attributes = new HashMap<>(); + for (XContentParser.Token t = parser.nextToken(); t != XContentParser.Token.END_OBJECT; t = parser.nextToken()) { + if (t == XContentParser.Token.FIELD_NAME) { + field = parser.currentName(); + continue; + } + if (Fields.TOKEN.equals(field)) { + term = parser.text(); + } else if (Fields.POSITION.equals(field)) { + position = parser.intValue(); + } else if (Fields.START_OFFSET.equals(field)) { + startOffset = parser.intValue(); + } else if (Fields.END_OFFSET.equals(field)) { + endOffset = parser.intValue(); + } else if (Fields.POSITION_LENGTH.equals(field)) { + positionLength = parser.intValue(); + } else if (Fields.TYPE.equals(field)) { + type = parser.text(); + } else { + if (t == XContentParser.Token.VALUE_STRING) { + attributes.put(field, parser.text()); + } else if (t == XContentParser.Token.VALUE_NUMBER) { + attributes.put(field, parser.numberValue()); + } else if (t == XContentParser.Token.VALUE_BOOLEAN) { + attributes.put(field, parser.booleanValue()); + } else if (t == XContentParser.Token.START_OBJECT) { + attributes.put(field, parser.map()); + } else if (t == XContentParser.Token.START_ARRAY) { + attributes.put(field, parser.list()); + } + } + } + return new AnalyzeToken(term, position, startOffset, endOffset, positionLength, type, attributes); + } + @Override public void readFrom(StreamInput in) throws IOException { term = in.readString(); @@ -125,8 +199,11 @@ public void readFrom(StreamInput in) throws IOException { positionLength = 1; } } + else { + positionLength = 1; + } type = in.readOptionalString(); - attributes = (Map) in.readGenericValue(); + attributes = in.readMap(); } @Override @@ -139,7 +216,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(positionLength > 1 ? positionLength : null); } out.writeOptionalString(type); - out.writeGenericValue(attributes); + out.writeMapWithConsistentOrder(attributes); } } @@ -188,6 +265,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("analyze_response", + true, args -> new AnalyzeResponse((List) args[0], (DetailAnalyzeResponse) args[1])); + static { + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> AnalyzeToken.fromXContent(p), new ParseField(Fields.TOKENS)); + PARSER.declareObject(optionalConstructorArg(), DetailAnalyzeResponse.PARSER, new ParseField(Fields.DETAIL)); + } + + public static AnalyzeResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -196,6 +284,9 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { tokens.add(AnalyzeToken.readAnalyzeToken(in)); } + if (tokens.size() == 0) { + tokens = null; + } detail = in.readOptionalStreamable(DetailAnalyzeResponse::new); } @@ -213,6 +304,25 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalStreamable(detail); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnalyzeResponse that = (AnalyzeResponse) o; + return Objects.equals(detail, that.detail) && + Objects.equals(tokens, that.tokens); + } + + @Override + public int hashCode() { + return Objects.hash(detail, tokens); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + static final class Fields { static final String TOKENS = "tokens"; static final String TOKEN = "token"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java index c080a01a98168..1e0c4ed525ef1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java @@ -20,20 +20,27 @@ package org.elasticsearch.action.admin.indices.analyze; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.lang.reflect.Array; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; -public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; - DetailAnalyzeResponse() { - } +public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { private boolean customAnalyzer = false; private AnalyzeTokenList analyzer; @@ -41,6 +48,9 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment { private AnalyzeTokenList tokenizer; private AnalyzeTokenList[] tokenfilters; + DetailAnalyzeResponse() { + } + public DetailAnalyzeResponse(AnalyzeTokenList analyzer) { this(false, analyzer, null, null, null); } @@ -66,6 +76,7 @@ public AnalyzeTokenList analyzer() { } public DetailAnalyzeResponse analyzer(AnalyzeTokenList analyzer) { + this.customAnalyzer = false; this.analyzer = analyzer; return this; } @@ -75,6 +86,7 @@ public CharFilteredText[] charfilters() { } public DetailAnalyzeResponse charfilters(CharFilteredText[] charfilters) { + this.customAnalyzer = true; this.charfilters = charfilters; return this; } @@ -84,6 +96,7 @@ public AnalyzeTokenList tokenizer() { } public DetailAnalyzeResponse tokenizer(AnalyzeTokenList tokenizer) { + this.customAnalyzer = true; this.tokenizer = tokenizer; return this; } @@ -93,10 +106,31 @@ public AnalyzeTokenList[] tokenfilters() { } public DetailAnalyzeResponse tokenfilters(AnalyzeTokenList[] tokenfilters) { + this.customAnalyzer = true; this.tokenfilters = tokenfilters; return this; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DetailAnalyzeResponse that = (DetailAnalyzeResponse) o; + return customAnalyzer == that.customAnalyzer && + Objects.equals(analyzer, that.analyzer) && + Arrays.equals(charfilters, that.charfilters) && + Objects.equals(tokenizer, that.tokenizer) && + Arrays.equals(tokenfilters, that.tokenfilters); + } + + @Override + public int hashCode() { + int result = Objects.hash(customAnalyzer, analyzer, tokenizer); + result = 31 * result + Arrays.hashCode(charfilters); + result = 31 * result + Arrays.hashCode(tokenfilters); + return result; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(Fields.CUSTOM_ANALYZER, customAnalyzer); @@ -131,6 +165,32 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @SuppressWarnings("unchecked") + private static T[] fromList(Class clazz, List list) { + if (list == null) { + return null; + } + return list.toArray((T[])Array.newInstance(clazz, 0)); + } + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("detail", + true, args -> new DetailAnalyzeResponse((boolean) args[0], (AnalyzeTokenList) args[1], + fromList(CharFilteredText.class, (List)args[2]), + (AnalyzeTokenList) args[3], + fromList(AnalyzeTokenList.class, (List)args[4]))); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField(Fields.CUSTOM_ANALYZER)); + PARSER.declareObject(optionalConstructorArg(), AnalyzeTokenList.PARSER, new ParseField(Fields.ANALYZER)); + PARSER.declareObjectArray(optionalConstructorArg(), CharFilteredText.PARSER, new ParseField(Fields.CHARFILTERS)); + PARSER.declareObject(optionalConstructorArg(), AnalyzeTokenList.PARSER, new ParseField(Fields.TOKENIZER)); + PARSER.declareObjectArray(optionalConstructorArg(), AnalyzeTokenList.PARSER, new ParseField(Fields.TOKENFILTERS)); + } + + public static DetailAnalyzeResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + static final class Fields { static final String NAME = "name"; static final String FILTERED_TEXT = "filtered_text"; @@ -195,6 +255,22 @@ public static class AnalyzeTokenList implements Streamable, ToXContentObject { private String name; private AnalyzeResponse.AnalyzeToken[] tokens; + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnalyzeTokenList that = (AnalyzeTokenList) o; + return Objects.equals(name, that.name) && + Arrays.equals(tokens, that.tokens); + } + + @Override + public int hashCode() { + int result = Objects.hash(name); + result = 31 * result + Arrays.hashCode(tokens); + return result; + } + AnalyzeTokenList() { } @@ -235,6 +311,20 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("token_list", + true, args -> new AnalyzeTokenList((String) args[0], + fromList(AnalyzeResponse.AnalyzeToken.class, (List)args[1]))); + + static { + PARSER.declareString(constructorArg(), new ParseField(Fields.NAME)); + PARSER.declareObjectArray(constructorArg(), (p, c) -> AnalyzeResponse.AnalyzeToken.fromXContent(p), + new ParseField(AnalyzeResponse.Fields.TOKENS)); + } + + public static AnalyzeTokenList fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + @Override public void readFrom(StreamInput in) throws IOException { name = in.readString(); @@ -264,6 +354,7 @@ public void writeTo(StreamOutput out) throws IOException { public static class CharFilteredText implements Streamable, ToXContentObject { private String name; private String[] texts; + CharFilteredText() { } @@ -293,6 +384,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("char_filtered_text", + true, args -> new CharFilteredText((String) args[0], ((List) args[1]).toArray(new String[0]))); + + static { + PARSER.declareString(constructorArg(), new ParseField(Fields.NAME)); + PARSER.declareStringArray(constructorArg(), new ParseField(Fields.FILTERED_TEXT)); + } + + public static CharFilteredText fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + public static CharFilteredText readCharFilteredText(StreamInput in) throws IOException { CharFilteredText text = new CharFilteredText(); text.readFrom(in); @@ -310,5 +413,21 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeStringArray(texts); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CharFilteredText that = (CharFilteredText) o; + return Objects.equals(name, that.name) && + Arrays.equals(texts, that.texts); + } + + @Override + public int hashCode() { + int result = Objects.hash(name); + result = 31 * result + Arrays.hashCode(texts); + return result; + } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java new file mode 100644 index 0000000000000..404db74a46e12 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.analyze; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Predicate; + +public class AnalyzeResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return s -> s.contains("tokens."); + } + + @Override + protected AnalyzeResponse doParseInstance(XContentParser parser) throws IOException { + return AnalyzeResponse.fromXContent(parser); + } + + @Override + protected AnalyzeResponse createBlankInstance() { + return new AnalyzeResponse(); + } + + @Override + protected AnalyzeResponse createTestInstance() { + int tokenCount = randomIntBetween(1, 30); + AnalyzeResponse.AnalyzeToken[] tokens = new AnalyzeResponse.AnalyzeToken[tokenCount]; + for (int i = 0; i < tokenCount; i++) { + tokens[i] = randomToken(); + } + DetailAnalyzeResponse dar = null; + if (randomBoolean()) { + dar = new DetailAnalyzeResponse(); + if (randomBoolean()) { + dar.charfilters(new DetailAnalyzeResponse.CharFilteredText[]{ + new DetailAnalyzeResponse.CharFilteredText("my_charfilter", new String[]{"one two"}) + }); + } + dar.tokenizer(new DetailAnalyzeResponse.AnalyzeTokenList("my_tokenizer", tokens)); + if (randomBoolean()) { + dar.tokenfilters(new DetailAnalyzeResponse.AnalyzeTokenList[]{ + new DetailAnalyzeResponse.AnalyzeTokenList("my_tokenfilter_1", tokens), + new DetailAnalyzeResponse.AnalyzeTokenList("my_tokenfilter_2", tokens) + }); + } + return new AnalyzeResponse(null, dar); + } + return new AnalyzeResponse(Arrays.asList(tokens), null); + } + + private AnalyzeResponse.AnalyzeToken randomToken() { + String token = randomAlphaOfLengthBetween(1, 20); + int position = randomIntBetween(0, 1000); + int startOffset = randomIntBetween(0, 1000); + int endOffset = randomIntBetween(0, 1000); + int posLength = randomIntBetween(1, 5); + String type = randomAlphaOfLengthBetween(1, 20); + Map extras = new HashMap<>(); + if (randomBoolean()) { + int entryCount = randomInt(6); + for (int i = 0; i < entryCount; i++) { + switch (randomInt(6)) { + case 0: + case 1: + case 2: + case 3: + String key = randomAlphaOfLength(5); + String value = randomAlphaOfLength(10); + extras.put(key, value); + break; + case 4: + String objkey = randomAlphaOfLength(5); + Map obj = new HashMap<>(); + obj.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + extras.put(objkey, obj); + break; + case 5: + String listkey = randomAlphaOfLength(5); + List list = new ArrayList<>(); + list.add(randomAlphaOfLength(4)); + list.add(randomAlphaOfLength(6)); + extras.put(listkey, list); + break; + } + } + } + return new AnalyzeResponse.AnalyzeToken(token, position, startOffset, endOffset, posLength, type, extras); + } +} From 396c5780667121266a2e609383ff2d2d8111d469 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Tue, 3 Jul 2018 08:08:40 -0700 Subject: [PATCH 22/36] Fix coerce validation_method in GeoBoundingBoxQueryBuilder (#31747) The Rectangle constructor validates bounds before coerce has a chance to normalize coordinates so it cannot be used as intermittent storage. This commit removes the Rectangle as an intermittent storage for the bounding box coordinates. Fixes #31718 --- .../query/GeoBoundingBoxQueryBuilder.java | 16 +++++++++------ .../GeoBoundingBoxQueryBuilderTests.java | 20 +++++++++++++++++++ 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 3fd325afe0914..d26973e390ac3 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -389,7 +389,8 @@ public static GeoBoundingBoxQueryBuilder fromXContent(XContentParser parser) thr GeoValidationMethod validationMethod = null; boolean ignoreUnmapped = DEFAULT_IGNORE_UNMAPPED; - Rectangle bbox = null; + // bottom (minLat), top (maxLat), left (minLon), right (maxLon) + double[] bbox = null; String type = "memory"; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -424,8 +425,8 @@ public static GeoBoundingBoxQueryBuilder fromXContent(XContentParser parser) thr throw new ElasticsearchParseException("failed to parse [{}] query. bounding box not provided", NAME); } - final GeoPoint topLeft = new GeoPoint(bbox.maxLat, bbox.minLon); //just keep the object - final GeoPoint bottomRight = new GeoPoint(bbox.minLat, bbox.maxLon); + final GeoPoint topLeft = new GeoPoint(bbox[1], bbox[2]); + final GeoPoint bottomRight = new GeoPoint(bbox[0], bbox[3]); GeoBoundingBoxQueryBuilder builder = new GeoBoundingBoxQueryBuilder(fieldName); builder.setCorners(topLeft, bottomRight); @@ -460,7 +461,10 @@ public String getWriteableName() { return NAME; } - public static Rectangle parseBoundingBox(XContentParser parser) throws IOException, ElasticsearchParseException { + /** + * Parses the bounding box and returns bottom, top, left, right coordinates + */ + public static double[] parseBoundingBox(XContentParser parser) throws IOException, ElasticsearchParseException { XContentParser.Token token = parser.currentToken(); if (token != XContentParser.Token.START_OBJECT) { throw new ElasticsearchParseException("failed to parse bounding box. Expected start object but found [{}]", token); @@ -521,8 +525,8 @@ public static Rectangle parseBoundingBox(XContentParser parser) throws IOExcepti + "using well-known text and explicit corners."); } org.locationtech.spatial4j.shape.Rectangle r = envelope.build(); - return new Rectangle(r.getMinY(), r.getMaxY(), r.getMinX(), r.getMaxX()); + return new double[]{r.getMinY(), r.getMaxY(), r.getMinX(), r.getMaxX()}; } - return new Rectangle(bottom, top, left, right); + return new double[]{bottom, top, left, right}; } } diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java index 0f17609ceeee8..d1f7972c7f441 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -509,6 +509,26 @@ public void testMalformedGeohashes() { assertThat(e1.getMessage(), containsString("Conflicting definition found using well-known text and explicit corners.")); } + public void testHonorsCoercion() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + String query = "{\n" + + " \"geo_bounding_box\": {\n" + + " \"validation_method\": \"COERCE\",\n" + + " \"" + GEO_POINT_FIELD_NAME + "\": {\n" + + " \"top_left\": {\n" + + " \"lat\": -15.5,\n" + + " \"lon\": 176.5\n" + + " },\n" + + " \"bottom_right\": {\n" + + " \"lat\": -19.6,\n" + + " \"lon\": 181\n" + + " }\n" + + " }\n" + + " }\n" + + "}\n"; + assertGeoBoundingBoxQuery(query); + } + @Override public void testMustRewrite() throws IOException { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); From c0056cddd8ab0317757d41ee1fa651b0c95617c5 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Tue, 3 Jul 2018 10:40:56 -0500 Subject: [PATCH 23/36] ingest: Introduction of a bytes processor (#31733) ingest: Introduction of a bytes processor This processor allows for human readable byte values (e.g. 1kb) to be converted to value in bytes (e.g. 1024). Internally this processor re-uses "ByteSizeValue.parseBytesSizeValue" which supports conversions up to Long.MAX_VALUE and the following units: "b", "kb", "mb", "gb", "tb", pb". This change also introduces a generic return type for the AbstractStringProcessor to allow for code reuse while supporting a String -> T conversion. (String -> Long in this case). --- docs/reference/ingest/ingest-node.asciidoc | 27 +++++ .../common/AbstractStringProcessor.java | 8 +- .../ingest/common/BytesProcessor.java | 60 ++++++++++++ .../ingest/common/IngestCommonPlugin.java | 1 + .../AbstractStringProcessorTestCase.java | 12 ++- .../common/BytesProcessorFactoryTests.java | 27 +++++ .../ingest/common/BytesProcessorTests.java | 98 +++++++++++++++++++ .../rest-api-spec/test/ingest/10_basic.yml | 1 + .../test/ingest/180_bytes_processor.yml | 42 ++++++++ 9 files changed, 269 insertions(+), 7 deletions(-) create mode 100644 modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java create mode 100644 modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorFactoryTests.java create mode 100644 modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java create mode 100644 modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/180_bytes_processor.yml diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 29ff039950925..c8c459312a5da 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -773,6 +773,33 @@ Accepts a single value or an array of values. -------------------------------------------------- // NOTCONSOLE +[[bytes-processor]] +=== Bytes Processor +Converts a human readable byte value (e.g. 1kb) to its value in bytes (e.g. 1024). + +Supported human readable units are "b", "kb", "mb", "gb", "tb", "pb" case insensitive. An error will occur if +the field is not a supported format or resultant value exceeds 2^63. + +[[bytes-options]] +.Bytes Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to convert +| `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place +| `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +|====== + +[source,js] +-------------------------------------------------- +{ + "bytes": { + "field": "foo" + } +} +-------------------------------------------------- +// NOTCONSOLE + [[convert-processor]] === Convert Processor Converts an existing field's value to a different type, such as converting a string to an integer. diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java index 9c163290757bd..23c98ca1e0c0e 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java @@ -27,10 +27,12 @@ import java.util.Map; /** - * Base class for processors that manipulate strings and require a single "fields" array config value, which + * Base class for processors that manipulate source strings and require a single "fields" array config value, which * holds a list of field names in string format. + * + * @param The resultant type for the target field */ -abstract class AbstractStringProcessor extends AbstractProcessor { +abstract class AbstractStringProcessor extends AbstractProcessor { private final String field; private final boolean ignoreMissing; private final String targetField; @@ -67,7 +69,7 @@ public final void execute(IngestDocument document) { document.setFieldValue(targetField, process(val)); } - protected abstract String process(String value); + protected abstract T process(String value); abstract static class Factory implements Processor.Factory { final String processorType; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java new file mode 100644 index 0000000000000..dfe9a054acf07 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.common.unit.ByteSizeValue; + +import java.util.Map; + +/** + * Processor that converts the content of string fields to the byte value. + * Throws exception is the field is not of type string or can not convert to the numeric byte value + */ +public final class BytesProcessor extends AbstractStringProcessor { + + public static final String TYPE = "bytes"; + + BytesProcessor(String processorTag, String field, boolean ignoreMissing, String targetField) { + super(processorTag, field, ignoreMissing, targetField); + } + + @Override + protected Long process(String value) { + return ByteSizeValue.parseBytesSizeValue(value, null, getField()).getBytes(); + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory extends AbstractStringProcessor.Factory { + + public Factory() { + super(TYPE); + } + + @Override + protected BytesProcessor newProcessor(String tag, Map config, String field, + boolean ignoreMissing, String targetField) { + return new BytesProcessor(tag, field, ignoreMissing, targetField); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index 591060098b728..bc900d325104a 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -81,6 +81,7 @@ public Map getProcessors(Processor.Parameters paramet processors.put(JsonProcessor.TYPE, new JsonProcessor.Factory()); processors.put(KeyValueProcessor.TYPE, new KeyValueProcessor.Factory()); processors.put(URLDecodeProcessor.TYPE, new URLDecodeProcessor.Factory()); + processors.put(BytesProcessor.TYPE, new BytesProcessor.Factory()); return Collections.unmodifiableMap(processors); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java index 9d37f27bb33e5..4e4182bfdc891 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java @@ -31,7 +31,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public abstract class AbstractStringProcessorTestCase extends ESTestCase { +public abstract class AbstractStringProcessorTestCase extends ESTestCase { protected abstract AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField); @@ -39,7 +39,11 @@ protected String modifyInput(String input) { return input; } - protected abstract String expectedResult(String input); + protected abstract T expectedResult(String input); + + protected Class expectedResultType(){ + return (Class) String.class; // most results types are Strings + } public void testProcessor() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); @@ -47,7 +51,7 @@ public void testProcessor() throws Exception { String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, modifyInput(fieldValue)); Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(expectedResult(fieldValue))); + assertThat(ingestDocument.getFieldValue(fieldName, expectedResultType()), equalTo(expectedResult(fieldValue))); } public void testFieldNotFound() throws Exception { @@ -109,6 +113,6 @@ public void testTargetField() throws Exception { String targetFieldName = fieldName + "foo"; Processor processor = newProcessor(fieldName, randomBoolean(), targetFieldName); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(targetFieldName, String.class), equalTo(expectedResult(fieldValue))); + assertThat(ingestDocument.getFieldValue(targetFieldName, expectedResultType()), equalTo(expectedResult(fieldValue))); } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorFactoryTests.java new file mode 100644 index 0000000000000..10050240310e4 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorFactoryTests.java @@ -0,0 +1,27 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +public class BytesProcessorFactoryTests extends AbstractStringProcessorFactoryTestCase { + @Override + protected AbstractStringProcessor.Factory newFactory() { + return new BytesProcessor.Factory(); + } +} diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java new file mode 100644 index 0000000000000..0da3434adf178 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest.common; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.ingest.RandomDocumentPicks; +import org.hamcrest.CoreMatchers; + +import static org.hamcrest.Matchers.equalTo; + +public class BytesProcessorTests extends AbstractStringProcessorTestCase { + + private String modifiedInput; + + @Override + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + return new BytesProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); + } + + @Override + protected String modifyInput(String input) { + //largest value that allows all results < Long.MAX_VALUE bytes + long randomNumber = randomLongBetween(1, Long.MAX_VALUE / ByteSizeUnit.PB.toBytes(1)); + ByteSizeUnit randomUnit = randomFrom(ByteSizeUnit.values()); + modifiedInput = randomNumber + randomUnit.getSuffix(); + return modifiedInput; + } + + @Override + protected Long expectedResult(String input) { + return ByteSizeValue.parseBytesSizeValue(modifiedInput, null, "").getBytes(); + } + + @Override + protected Class expectedResultType() { + return Long.class; + } + + public void testTooLarge() { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "8912pb"); + Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> processor.execute(ingestDocument)); + assertThat(exception.getMessage(), + CoreMatchers.equalTo("failed to parse setting [" + fieldName + "] with value [8912pb] as a size in bytes")); + assertThat(exception.getCause().getMessage(), + CoreMatchers.containsString("Values greater than 9223372036854775807 bytes are not supported")); + } + + public void testNotBytes() { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "junk"); + Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> processor.execute(ingestDocument)); + assertThat(exception.getMessage(), + CoreMatchers.equalTo("failed to parse [junk]")); + } + + public void testMissingUnits() { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "1"); + Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> processor.execute(ingestDocument)); + assertThat(exception.getMessage(), + CoreMatchers.containsString("unit is missing or unrecognized")); + } + + public void testFractional() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "1.1kb"); + Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(fieldName, expectedResultType()), equalTo(1126L)); + assertWarnings("Fractional bytes values are deprecated. Use non-fractional bytes values instead: [1.1kb] found for setting " + + "[" + fieldName + "]"); + } +} diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml index 12efaa9570372..86557946ac0dd 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/10_basic.yml @@ -30,3 +30,4 @@ - contains: { nodes.$master.ingest.processors: { type: split } } - contains: { nodes.$master.ingest.processors: { type: trim } } - contains: { nodes.$master.ingest.processors: { type: uppercase } } + - contains: { nodes.$master.ingest.processors: { type: bytes } } diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/180_bytes_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/180_bytes_processor.yml new file mode 100644 index 0000000000000..bc48720966c5f --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/180_bytes_processor.yml @@ -0,0 +1,42 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test bytes processor": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "bytes" : { + "field" : "bytes_source_field", + "target_field" : "bytes_target_field" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: {bytes_source_field: "1kb"} + + - do: + get: + index: test + type: test + id: 1 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.bytes_target_field: 1024 } + From dc869aa14993e29e080bfa3bc0ff75a057c3af92 Mon Sep 17 00:00:00 2001 From: Sohaib Iftikhar Date: Tue, 3 Jul 2018 18:13:28 +0200 Subject: [PATCH 24/36] Build: re-enabled bwc (#31769) -- It was disabled by #31675 --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index 67128426eebd0..a6b7b4ec6fb63 100644 --- a/build.gradle +++ b/build.gradle @@ -170,8 +170,8 @@ task verifyVersions { * the enabled state of every bwc task. It should be set back to true * after the backport of the backcompat code is complete. */ -final boolean bwc_tests_enabled = false -final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/31675" /* place a PR link here when commiting bwc changes */ +final boolean bwc_tests_enabled = true +final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") From e65115ae5ae0e07af986ef109bb3fde5af644b47 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Tue, 3 Jul 2018 11:29:39 -0500 Subject: [PATCH 25/36] Consolidate watcher setting update registration (#31762) Previously the call to register a listener for settings updates was in each individual service, rather than in the notification service itself. This change ensures that each child of the notification service gets registered with the settings update consumer. --- .../watcher/notification/NotificationService.java | 12 +++++++++++- .../watcher/notification/email/EmailService.java | 3 +-- .../watcher/notification/hipchat/HipChatService.java | 3 +-- .../xpack/watcher/notification/jira/JiraService.java | 3 +-- .../notification/pagerduty/PagerDutyService.java | 2 +- .../watcher/notification/slack/SlackService.java | 3 +-- .../notification/NotificationServiceTests.java | 4 ++-- 7 files changed, 18 insertions(+), 12 deletions(-) rename x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/{ => watcher}/notification/NotificationServiceTests.java (98%) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java index 9870bcd086534..88399d3cb93d8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java @@ -7,11 +7,14 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.BiFunction; @@ -25,7 +28,14 @@ public abstract class NotificationService extends AbstractComponent { private Map accounts; private Account defaultAccount; - public NotificationService(Settings settings, String type) { + public NotificationService(Settings settings, String type, + ClusterSettings clusterSettings, List> pluginSettings) { + this(settings, type); + clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, pluginSettings); + } + + // Used for testing only + NotificationService(Settings settings, String type) { super(settings); this.type = type; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java index 41a2ecc3bcc80..e0687ee5d6316 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java @@ -94,9 +94,8 @@ public class EmailService extends NotificationService { private final CryptoService cryptoService; public EmailService(Settings settings, @Nullable CryptoService cryptoService, ClusterSettings clusterSettings) { - super(settings, "email"); + super(settings, "email", clusterSettings, EmailService.getSettings()); this.cryptoService = cryptoService; - clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings()); // ensure logging of setting changes clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); clusterSettings.addAffixUpdateConsumer(SETTING_PROFILE, (s, o) -> {}, (s, o) -> {}); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java index 477b4545294bd..ebbcdd5662c20 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/hipchat/HipChatService.java @@ -65,9 +65,8 @@ public class HipChatService extends NotificationService { private HipChatServer defaultServer; public HipChatService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { - super(settings, "hipchat"); + super(settings, "hipchat", clusterSettings, HipChatService.getSettings()); this.httpClient = httpClient; - clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings()); // ensure logging of setting changes clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_HOST, (s) -> {}); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java index 297531cbe81ca..ad9652ae2083d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/jira/JiraService.java @@ -60,9 +60,8 @@ public class JiraService extends NotificationService { private final HttpClient httpClient; public JiraService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { - super(settings, "jira"); + super(settings, "jira", clusterSettings, JiraService.getSettings()); this.httpClient = httpClient; - clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings()); // ensure logging of setting changes clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); clusterSettings.addAffixUpdateConsumer(SETTING_ALLOW_HTTP, (s, o) -> {}, (s, o) -> {}); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java index e74e78707beff..51be23d5b63ea 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/PagerDutyService.java @@ -39,7 +39,7 @@ public class PagerDutyService extends NotificationService { private final HttpClient httpClient; public PagerDutyService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { - super(settings, "pagerduty"); + super(settings, "pagerduty", clusterSettings, PagerDutyService.getSettings()); this.httpClient = httpClient; clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); clusterSettings.addAffixUpdateConsumer(SETTING_SERVICE_API_KEY, (s, o) -> {}, (s, o) -> {}); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java index 92f44bfcbe39b..c784be1d4f021 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/SlackService.java @@ -39,9 +39,8 @@ public class SlackService extends NotificationService { private final HttpClient httpClient; public SlackService(Settings settings, HttpClient httpClient, ClusterSettings clusterSettings) { - super(settings, "slack"); + super(settings, "slack", clusterSettings, SlackService.getSettings()); this.httpClient = httpClient; - clusterSettings.addSettingsUpdateConsumer(this::setAccountSetting, getSettings()); clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); clusterSettings.addAffixUpdateConsumer(SETTING_URL, (s, o) -> {}, (s, o) -> {}); clusterSettings.addAffixUpdateConsumer(SETTING_URL_SECURE, (s, o) -> {}, (s, o) -> {}); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/notification/NotificationServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java similarity index 98% rename from x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/notification/NotificationServiceTests.java rename to x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java index bb5f234ca950a..829337e9acb7a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/notification/NotificationServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.notification; +package org.elasticsearch.xpack.watcher.notification; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; @@ -90,4 +90,4 @@ protected String createAccount(String name, Settings accountSettings) { return name; } } -} \ No newline at end of file +} From ed41d4f5661264ce7d9fb8fcdd21bc9a5a722fd2 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 3 Jul 2018 18:55:23 +0200 Subject: [PATCH 26/36] Fix not waiting for Netty ThreadDeathWatcher in IT (#31758) Same problem and solution as in #30763 Fixes #30547 --- .../SmokeTestMonitoringWithSecurityIT.java | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java index 6c999ca2a7291..c427d8bf32c86 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java +++ b/x-pack/qa/smoke-test-plugins-ssl/src/test/java/org/elasticsearch/smoketest/SmokeTestMonitoringWithSecurityIT.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.smoketest; +import io.netty.util.ThreadDeathWatcher; +import io.netty.util.concurrent.GlobalEventExecutor; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.common.network.NetworkAddress; @@ -19,12 +21,15 @@ import org.elasticsearch.xpack.core.security.SecurityField; import org.junit.After; import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.ExternalResource; import java.net.InetSocketAddress; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Optional; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -42,6 +47,36 @@ * indexed in the cluster. */ public class SmokeTestMonitoringWithSecurityIT extends ESIntegTestCase { + + /** + * A JUnit class level rule that runs after the AfterClass method in {@link ESIntegTestCase}, + * which stops the cluster. After the cluster is stopped, there are a few netty threads that + * can linger, so we wait for them to finish otherwise these lingering threads can intermittently + * trigger the thread leak detector + */ + @ClassRule + public static final ExternalResource STOP_NETTY_RESOURCE = new ExternalResource() { + @Override + protected void after() { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (IllegalStateException e) { + if (e.getMessage().equals("thread was not started") == false) { + throw e; + } + // ignore since the thread was never started + } + + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + }; + private static final String USER = "test_user"; private static final String PASS = "x-pack-test-password"; private static final String MONITORING_PATTERN = ".monitoring-*"; From a02e5ee740f663601e9cea32a785ef252d28a6b8 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Tue, 3 Jul 2018 13:31:56 -0700 Subject: [PATCH 27/36] Painless: Complete Removal of Painless Type (#31699) This completes the removal of Painless Type. The new data structures in the definition are a map of names (String) to Java Classes and a map of Java Classes to Painless Structs. The names to Java Classes map can contain a 2 to 1 ratio of names to classes depending on whether or not a short (imported) name is used. The Java Classes to Painless Structs is 1 to 1 always where the Java Class name must match the Painless Struct name. This should lead a significantly simpler type system in Painless moving forward since the Painless Type only held redundant information since Painless does not support generics. --- .../src/main/antlr/PainlessLexer.g4 | 6 +- .../java/org/elasticsearch/painless/Def.java | 36 +- .../elasticsearch/painless/DefBootstrap.java | 60 +-- .../org/elasticsearch/painless/DefMath.java | 328 +++++++-------- .../elasticsearch/painless/Definition.java | 381 +++++------------- .../elasticsearch/painless/FunctionRef.java | 6 +- .../painless/PainlessExplainError.java | 2 +- .../painless/PainlessPlugin.java | 2 +- .../painless/ScriptClassInfo.java | 18 +- .../painless/antlr/EnhancedPainlessLexer.java | 4 +- .../painless/antlr/PainlessLexer.java | 89 ++-- .../painless/antlr/PainlessParser.java | 143 ++++--- .../elasticsearch/painless/antlr/Walker.java | 7 +- .../painless/node/EExplicit.java | 3 +- .../painless/node/EFunctionRef.java | 2 +- .../painless/node/EInstanceof.java | 2 +- .../elasticsearch/painless/node/ELambda.java | 2 +- .../painless/node/EListInit.java | 4 +- .../elasticsearch/painless/node/EMapInit.java | 4 +- .../painless/node/ENewArray.java | 19 +- .../elasticsearch/painless/node/ENewObj.java | 4 +- .../elasticsearch/painless/node/ERegex.java | 2 +- .../elasticsearch/painless/node/EStatic.java | 3 +- .../elasticsearch/painless/node/PBrace.java | 4 +- .../painless/node/PCallInvoke.java | 4 +- .../elasticsearch/painless/node/PField.java | 2 +- .../painless/node/PSubDefCall.java | 1 - .../elasticsearch/painless/node/SCatch.java | 3 +- .../painless/node/SDeclaration.java | 3 +- .../elasticsearch/painless/node/SEach.java | 2 +- .../painless/node/SFunction.java | 4 +- .../painless/node/SSubEachIterable.java | 6 +- .../painless/DefBootstrapTests.java | 4 +- .../painless/PainlessDocGenerator.java | 62 ++- .../painless/node/NodeToStringTests.java | 24 +- 35 files changed, 541 insertions(+), 705 deletions(-) diff --git a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 index 6ab6a86113595..fe58984fa8712 100644 --- a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 @@ -19,14 +19,14 @@ lexer grammar PainlessLexer; -@members{ +@members { /** * Check against the current whitelist to determine whether a token is a type * or not. Called by the {@code TYPE} token defined in {@code PainlessLexer.g4}. * See also * The lexer hack. */ -protected abstract boolean isSimpleType(String name); +protected abstract boolean isType(String name); /** * Is the preceding {@code /} a the beginning of a regex (true) or a division @@ -133,7 +133,7 @@ NULL: 'null'; // or not. Note this works by processing one character at a time // and the rule is added or removed as this happens. This is also known // as "the lexer hack." See (https://en.wikipedia.org/wiki/The_lexer_hack). -TYPE: ID ( DOT ID )* { isSimpleType(getText()) }?; +TYPE: ID ( DOT ID )* { isType(getText()) }?; ID: [_a-zA-Z] [_a-zA-Z0-9]*; mode AFTER_DOT; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index 988a31a24ee27..8694ff7903859 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -185,7 +185,7 @@ static Method lookupMethodInternal(Definition definition, Class receiverClass Definition.MethodKey key = new Definition.MethodKey(name, arity); // check whitelist for matching method for (Class clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) { - Struct struct = definition.RuntimeClassToStruct(clazz); + Struct struct = definition.getPainlessStructFromJavaClass(clazz); if (struct != null) { Method method = struct.methods.get(key); @@ -195,7 +195,7 @@ static Method lookupMethodInternal(Definition definition, Class receiverClass } for (Class iface : clazz.getInterfaces()) { - struct = definition.RuntimeClassToStruct(iface); + struct = definition.getPainlessStructFromJavaClass(iface); if (struct != null) { Method method = struct.methods.get(key); @@ -279,7 +279,7 @@ static MethodHandle lookupMethod(Definition definition, Lookup lookup, MethodTyp captures[capture] = callSiteType.parameterType(i + 1 + capture); } MethodHandle filter; - Definition.Type interfaceType = definition.ClassToType(method.arguments.get(i - 1 - replaced)); + Class interfaceType = method.arguments.get(i - 1 - replaced); if (signature.charAt(0) == 'S') { // the implementation is strongly typed, now that we know the interface type, // we have everything. @@ -293,14 +293,14 @@ static MethodHandle lookupMethod(Definition definition, Lookup lookup, MethodTyp // the interface type is now known, but we need to get the implementation. // this is dynamically based on the receiver type (and cached separately, underneath // this cache). It won't blow up since we never nest here (just references) - MethodType nestedType = MethodType.methodType(interfaceType.clazz, captures); + MethodType nestedType = MethodType.methodType(interfaceType, captures); CallSite nested = DefBootstrap.bootstrap(definition, lookup, call, nestedType, 0, DefBootstrap.REFERENCE, - interfaceType.name); + Definition.ClassToName(interfaceType)); filter = nested.dynamicInvoker(); } else { throw new AssertionError(); @@ -324,8 +324,8 @@ static MethodHandle lookupMethod(Definition definition, Lookup lookup, MethodTyp */ static MethodHandle lookupReference(Definition definition, Lookup lookup, String interfaceClass, Class receiverClass, String name) throws Throwable { - Definition.Type interfaceType = definition.getType(interfaceClass); - Method interfaceMethod = interfaceType.struct.functionalMethod; + Class interfaceType = definition.getJavaClassFromPainlessType(interfaceClass); + Method interfaceMethod = definition.getPainlessStructFromJavaClass(interfaceType).functionalMethod; if (interfaceMethod == null) { throw new IllegalArgumentException("Class [" + interfaceClass + "] is not a functional interface"); } @@ -337,15 +337,15 @@ static MethodHandle lookupReference(Definition definition, Lookup lookup, String /** Returns a method handle to an implementation of clazz, given method reference signature. */ private static MethodHandle lookupReferenceInternal(Definition definition, Lookup lookup, - Definition.Type clazz, String type, String call, Class... captures) + Class clazz, String type, String call, Class... captures) throws Throwable { final FunctionRef ref; if ("this".equals(type)) { // user written method - Method interfaceMethod = clazz.struct.functionalMethod; + Method interfaceMethod = definition.getPainlessStructFromJavaClass(clazz).functionalMethod; if (interfaceMethod == null) { throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " + - "to [" + clazz.name + "], not a functional interface"); + "to [" + Definition.ClassToName(clazz) + "], not a functional interface"); } int arity = interfaceMethod.arguments.size() + captures.length; final MethodHandle handle; @@ -359,14 +359,14 @@ private static MethodHandle lookupReferenceInternal(Definition definition, Looku // because the arity does not match the expected interface type. if (call.contains("$")) { throw new IllegalArgumentException("Incorrect number of parameters for [" + interfaceMethod.name + - "] in [" + clazz.clazz + "]"); + "] in [" + clazz + "]"); } throw new IllegalArgumentException("Unknown call [" + call + "] with [" + arity + "] arguments."); } - ref = new FunctionRef(clazz.clazz, interfaceMethod, call, handle.type(), captures.length); + ref = new FunctionRef(clazz, interfaceMethod, call, handle.type(), captures.length); } else { // whitelist lookup - ref = new FunctionRef(definition, clazz.clazz, type, call, captures.length); + ref = new FunctionRef(definition, clazz, type, call, captures.length); } final CallSite callSite = LambdaBootstrap.lambdaBootstrap( lookup, @@ -379,7 +379,7 @@ private static MethodHandle lookupReferenceInternal(Definition definition, Looku ref.delegateMethodType, ref.isDelegateInterface ? 1 : 0 ); - return callSite.dynamicInvoker().asType(MethodType.methodType(clazz.clazz, captures)); + return callSite.dynamicInvoker().asType(MethodType.methodType(clazz, captures)); } /** gets the field name used to lookup up the MethodHandle for a function. */ @@ -416,7 +416,7 @@ public static String getUserFunctionHandleFieldName(String name, int arity) { static MethodHandle lookupGetter(Definition definition, Class receiverClass, String name) { // first try whitelist for (Class clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) { - Struct struct = definition.RuntimeClassToStruct(clazz); + Struct struct = definition.getPainlessStructFromJavaClass(clazz); if (struct != null) { MethodHandle handle = struct.getters.get(name); @@ -426,7 +426,7 @@ static MethodHandle lookupGetter(Definition definition, Class receiverClass, } for (final Class iface : clazz.getInterfaces()) { - struct = definition.RuntimeClassToStruct(iface); + struct = definition.getPainlessStructFromJavaClass(iface); if (struct != null) { MethodHandle handle = struct.getters.get(name); @@ -487,7 +487,7 @@ static MethodHandle lookupGetter(Definition definition, Class receiverClass, static MethodHandle lookupSetter(Definition definition, Class receiverClass, String name) { // first try whitelist for (Class clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) { - Struct struct = definition.RuntimeClassToStruct(clazz); + Struct struct = definition.getPainlessStructFromJavaClass(clazz); if (struct != null) { MethodHandle handle = struct.setters.get(name); @@ -497,7 +497,7 @@ static MethodHandle lookupSetter(Definition definition, Class receiverClass, } for (final Class iface : clazz.getInterfaces()) { - struct = definition.RuntimeClassToStruct(iface); + struct = definition.getPainlessStructFromJavaClass(iface); if (struct != null) { MethodHandle handle = struct.setters.get(name); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java index 31fba8f757954..9c7c7f631b68d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java @@ -1,7 +1,3 @@ -package org.elasticsearch.painless; - -import org.elasticsearch.common.SuppressForbidden; - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -21,6 +17,10 @@ * under the License. */ +package org.elasticsearch.painless; + +import org.elasticsearch.common.SuppressForbidden; + import java.lang.invoke.CallSite; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; @@ -72,16 +72,16 @@ private DefBootstrap() {} // no instance! public static final int SHIFT_OPERATOR = 9; /** static bootstrap parameter indicating a request to normalize an index for array-like-access */ public static final int INDEX_NORMALIZE = 10; - + // constants for the flags parameter of operators - /** - * static bootstrap parameter indicating the binary operator allows nulls (e.g. == and +) + /** + * static bootstrap parameter indicating the binary operator allows nulls (e.g. == and +) *

* requires additional {@link MethodHandles#catchException} guard, which will invoke * the fallback if a null is encountered. */ public static final int OPERATOR_ALLOWS_NULL = 1 << 0; - + /** * static bootstrap parameter indicating the binary operator is part of compound assignment (e.g. +=). *

@@ -89,7 +89,7 @@ private DefBootstrap() {} // no instance! * to cast back to the receiver's type, depending on types seen. */ public static final int OPERATOR_COMPOUND_ASSIGNMENT = 1 << 1; - + /** * static bootstrap parameter indicating an explicit cast to the return type. *

@@ -129,7 +129,7 @@ static final class PIC extends MutableCallSite { setTarget(fallback); } - + /** * guard method for inline caching: checks the receiver's class is the same * as the cached class @@ -162,7 +162,7 @@ private MethodHandle lookup(int flavor, String name, Class receiver) throws T default: throw new AssertionError(); } } - + /** * Creates the {@link MethodHandle} for the megamorphic call site * using {@link ClassValue} and {@link MethodHandles#exactInvoker(MethodType)}: @@ -182,7 +182,7 @@ protected MethodHandle computeValue(Class receiverType) { } }; return MethodHandles.foldArguments(MethodHandles.exactInvoker(type), - MEGAMORPHIC_LOOKUP.bindTo(megamorphicCache)); + MEGAMORPHIC_LOOKUP.bindTo(megamorphicCache)); } /** @@ -195,18 +195,18 @@ Object fallback(final Object[] callArgs) throws Throwable { if (depth >= MAX_DEPTH) { // we revert the whole cache and build a new megamorphic one final MethodHandle target = this.createMegamorphicHandle(); - + setTarget(target); - return target.invokeWithArguments(callArgs); + return target.invokeWithArguments(callArgs); } else { final Class receiver = callArgs[0].getClass(); final MethodHandle target = lookup(flavor, name, receiver).asType(type()); - + MethodHandle test = CHECK_CLASS.bindTo(receiver); MethodHandle guard = MethodHandles.guardWithTest(test, target, getTarget()); - + depth++; - + setTarget(guard); return target.invokeWithArguments(callArgs); } @@ -225,7 +225,7 @@ Object fallback(final Object[] callArgs) throws Throwable { MethodType.methodType(Object.class, Object[].class)); MethodHandle mh = publicLookup.findVirtual(ClassValue.class, "get", MethodType.methodType(Object.class, Class.class)); - mh = MethodHandles.filterArguments(mh, 1, + mh = MethodHandles.filterArguments(mh, 1, publicLookup.findVirtual(Object.class, "getClass", MethodType.methodType(Class.class))); MEGAMORPHIC_LOOKUP = mh.asType(mh.type().changeReturnType(MethodHandle.class)); } catch (ReflectiveOperationException e) { @@ -233,7 +233,7 @@ Object fallback(final Object[] callArgs) throws Throwable { } } } - + /** * CallSite that implements the monomorphic inlining cache (for operators). */ @@ -252,14 +252,14 @@ static final class MIC extends MutableCallSite { if (initialDepth > 0) { initialized = true; } - + MethodHandle fallback = FALLBACK.bindTo(this) .asCollector(Object[].class, type.parameterCount()) .asType(type); setTarget(fallback); } - + /** * Does a slow lookup for the operator */ @@ -290,7 +290,7 @@ private MethodHandle lookup(Object[] args) throws Throwable { default: throw new AssertionError(); } } - + private MethodHandle lookupGeneric() { MethodHandle target = DefMath.lookupGeneric(name); if ((flags & OPERATOR_EXPLICIT_CAST) != 0) { @@ -302,7 +302,7 @@ private MethodHandle lookupGeneric() { } return target; } - + /** * Called when a new type is encountered or if cached type does not match. * In that case we revert to a generic, but slower operator handling. @@ -315,7 +315,7 @@ Object fallback(Object[] args) throws Throwable { setTarget(generic.asType(type())); return generic.invokeWithArguments(args); } - + final MethodType type = type(); MethodHandle target = lookup(args); // for math operators: WrongMethodType can be confusing. convert into a ClassCastException if they screw up. @@ -361,18 +361,18 @@ Object fallback(Object[] args) throws Throwable { // very special cases, where even the receiver can be null (see JLS rules for string concat) // we wrap + with an NPE catcher, and use our generic method in that case. if (flavor == BINARY_OPERATOR && (flags & OPERATOR_ALLOWS_NULL) != 0) { - MethodHandle handler = MethodHandles.dropArguments(lookupGeneric().asType(type()), - 0, + MethodHandle handler = MethodHandles.dropArguments(lookupGeneric().asType(type()), + 0, NullPointerException.class); guard = MethodHandles.catchException(guard, NullPointerException.class, handler); } - + initialized = true; setTarget(guard); return target.invokeWithArguments(args); } - + /** * guard method for inline caching: checks the receiver's class is the same * as the cached class @@ -388,7 +388,7 @@ static boolean checkLHS(Class clazz, Object leftObject) { static boolean checkRHS(Class left, Class right, Object leftObject, Object rightObject) { return rightObject.getClass() == right; } - + /** * guard method for inline caching: checks the receiver's class and the first argument * are the same as the cached receiver and first argument. @@ -396,7 +396,7 @@ static boolean checkRHS(Class left, Class right, Object leftObject, Object static boolean checkBoth(Class left, Class right, Object leftObject, Object rightObject) { return leftObject.getClass() == left && rightObject.getClass() == right; } - + private static final MethodHandle CHECK_LHS; private static final MethodHandle CHECK_RHS; private static final MethodHandle CHECK_BOTH; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefMath.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefMath.java index 6628484660699..f903c0571b2bd 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefMath.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefMath.java @@ -21,8 +21,8 @@ import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; -import java.lang.invoke.MethodType; import java.lang.invoke.MethodHandles.Lookup; +import java.lang.invoke.MethodType; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -31,38 +31,38 @@ import java.util.stream.Stream; /** - * Dynamic operators for painless. + * Dynamic operators for painless. *

* Each operator must "support" the following types: - * {@code int,long,float,double,boolean,Object}. Operators can throw exceptions if + * {@code int,long,float,double,boolean,Object}. Operators can throw exceptions if * the type is illegal. The {@code Object} type must be a "generic" handler that * handles all legal types: it must be convertible to every possible legal signature. */ @SuppressWarnings("unused") public class DefMath { - + // Unary not: only applicable to integral types private static int not(int v) { return ~v; } - + private static long not(long v) { return ~v; } - + private static float not(float v) { throw new ClassCastException("Cannot apply not [~] to type [float]"); } - + private static double not(double v) { throw new ClassCastException("Cannot apply not [~] to type [double]"); } - + private static boolean not(boolean v) { throw new ClassCastException("Cannot apply not [~] to type [boolean]"); } - + private static Object not(Object unary) { if (unary instanceof Long) { return ~(Long)unary; @@ -79,29 +79,29 @@ private static Object not(Object unary) { throw new ClassCastException("Cannot apply [~] operation to type " + "[" + unary.getClass().getCanonicalName() + "]."); } - + // unary negation and plus: applicable to all numeric types private static int neg(int v) { return -v; } - + private static long neg(long v) { return -v; } - + private static float neg(float v) { return -v; } - + private static double neg(double v) { return -v; } - + private static boolean neg(boolean v) { throw new ClassCastException("Cannot apply [-] operation to type [boolean]"); } - + private static Object neg(final Object unary) { if (unary instanceof Double) { return -(double)unary; @@ -122,27 +122,27 @@ private static Object neg(final Object unary) { throw new ClassCastException("Cannot apply [-] operation to type " + "[" + unary.getClass().getCanonicalName() + "]."); } - + private static int plus(int v) { return +v; } - + private static long plus(long v) { return +v; } - + private static float plus(float v) { return +v; } - + private static double plus(double v) { return +v; } - + private static boolean plus(boolean v) { throw new ClassCastException("Cannot apply [+] operation to type [boolean]"); } - + private static Object plus(final Object unary) { if (unary instanceof Double) { return +(double)unary; @@ -163,29 +163,29 @@ private static Object plus(final Object unary) { throw new ClassCastException("Cannot apply [+] operation to type " + "[" + unary.getClass().getCanonicalName() + "]."); } - + // multiplication/division/remainder/subtraction: applicable to all integer types - + private static int mul(int a, int b) { return a * b; } - + private static long mul(long a, long b) { return a * b; } - + private static float mul(float a, float b) { return a * b; } - + private static double mul(double a, double b) { return a * b; } - + private static boolean mul(boolean a, boolean b) { throw new ClassCastException("Cannot apply [*] operation to type [boolean]"); } - + private static Object mul(Object left, Object right) { if (left instanceof Number) { if (right instanceof Number) { @@ -228,27 +228,27 @@ private static Object mul(Object left, Object right) { throw new ClassCastException("Cannot apply [*] operation to types " + "[" + left.getClass().getCanonicalName() + "] and [" + right.getClass().getCanonicalName() + "]."); } - + private static int div(int a, int b) { return a / b; } - + private static long div(long a, long b) { return a / b; } - + private static float div(float a, float b) { return a / b; } - + private static double div(double a, double b) { return a / b; } - + private static boolean div(boolean a, boolean b) { throw new ClassCastException("Cannot apply [/] operation to type [boolean]"); } - + private static Object div(Object left, Object right) { if (left instanceof Number) { if (right instanceof Number) { @@ -291,27 +291,27 @@ private static Object div(Object left, Object right) { throw new ClassCastException("Cannot apply [/] operation to types " + "[" + left.getClass().getCanonicalName() + "] and [" + right.getClass().getCanonicalName() + "]."); } - + private static int rem(int a, int b) { return a % b; } - + private static long rem(long a, long b) { return a % b; } - + private static float rem(float a, float b) { return a % b; } - + private static double rem(double a, double b) { return a % b; } - + private static boolean rem(boolean a, boolean b) { throw new ClassCastException("Cannot apply [%] operation to type [boolean]"); } - + private static Object rem(Object left, Object right) { if (left instanceof Number) { if (right instanceof Number) { @@ -354,30 +354,30 @@ private static Object rem(Object left, Object right) { throw new ClassCastException("Cannot apply [%] operation to types " + "[" + left.getClass().getCanonicalName() + "] and [" + right.getClass().getCanonicalName() + "]."); } - + // addition: applicable to all numeric types. // additionally, if either type is a string, the other type can be any arbitrary type (including null) - + private static int add(int a, int b) { return a + b; } - + private static long add(long a, long b) { return a + b; } - + private static float add(float a, float b) { return a + b; } - + private static double add(double a, double b) { return a + b; } - + private static boolean add(boolean a, boolean b) { throw new ClassCastException("Cannot apply [+] operation to type [boolean]"); } - + private static Object add(Object left, Object right) { if (left instanceof String) { return (String) left + right; @@ -424,27 +424,27 @@ private static Object add(Object left, Object right) { throw new ClassCastException("Cannot apply [+] operation to types " + "[" + left.getClass().getCanonicalName() + "] and [" + right.getClass().getCanonicalName() + "]."); } - + private static int sub(int a, int b) { return a - b; } - + private static long sub(long a, long b) { return a - b; } - + private static float sub(float a, float b) { return a - b; } - + private static double sub(double a, double b) { return a - b; } - + private static boolean sub(boolean a, boolean b) { throw new ClassCastException("Cannot apply [-] operation to type [boolean]"); } - + private static Object sub(Object left, Object right) { if (left instanceof Number) { if (right instanceof Number) { @@ -487,29 +487,29 @@ private static Object sub(Object left, Object right) { throw new ClassCastException("Cannot apply [-] operation to types " + "[" + left.getClass().getCanonicalName() + "] and [" + right.getClass().getCanonicalName() + "]."); } - + // eq: applicable to any arbitrary type, including nulls for both arguments!!! private static boolean eq(int a, int b) { return a == b; } - + private static boolean eq(long a, long b) { return a == b; } - + private static boolean eq(float a, float b) { return a == b; } - + private static boolean eq(double a, double b) { return a == b; } - + private static boolean eq(boolean a, boolean b) { return a == b; } - + private static boolean eq(Object left, Object right) { if (left != null && right != null) { if (left instanceof Double) { @@ -565,29 +565,29 @@ private static boolean eq(Object left, Object right) { return left == null && right == null; } - + // comparison operators: applicable for any numeric type private static boolean lt(int a, int b) { return a < b; } - + private static boolean lt(long a, long b) { return a < b; } - + private static boolean lt(float a, float b) { return a < b; } - + private static boolean lt(double a, double b) { return a < b; } - + private static boolean lt(boolean a, boolean b) { - throw new ClassCastException("Cannot apply [<] operation to type [boolean]"); + throw new ClassCastException("Cannot apply [<] operation to type [boolean]"); } - + private static boolean lt(Object left, Object right) { if (left instanceof Number) { if (right instanceof Number) { @@ -634,23 +634,23 @@ private static boolean lt(Object left, Object right) { private static boolean lte(int a, int b) { return a <= b; } - + private static boolean lte(long a, long b) { return a <= b; } - + private static boolean lte(float a, float b) { return a <= b; } - + private static boolean lte(double a, double b) { return a <= b; } - + private static boolean lte(boolean a, boolean b) { - throw new ClassCastException("Cannot apply [<=] operation to type [boolean]"); + throw new ClassCastException("Cannot apply [<=] operation to type [boolean]"); } - + private static boolean lte(Object left, Object right) { if (left instanceof Number) { if (right instanceof Number) { @@ -697,23 +697,23 @@ private static boolean lte(Object left, Object right) { private static boolean gt(int a, int b) { return a > b; } - + private static boolean gt(long a, long b) { return a > b; } - + private static boolean gt(float a, float b) { return a > b; } - + private static boolean gt(double a, double b) { return a > b; } - + private static boolean gt(boolean a, boolean b) { - throw new ClassCastException("Cannot apply [>] operation to type [boolean]"); + throw new ClassCastException("Cannot apply [>] operation to type [boolean]"); } - + private static boolean gt(Object left, Object right) { if (left instanceof Number) { if (right instanceof Number) { @@ -756,25 +756,25 @@ private static boolean gt(Object left, Object right) { throw new ClassCastException("Cannot apply [>] operation to types " + "[" + left.getClass().getCanonicalName() + "] and [" + right.getClass().getCanonicalName() + "]."); } - + private static boolean gte(int a, int b) { return a >= b; } - + private static boolean gte(long a, long b) { return a >= b; } - + private static boolean gte(float a, float b) { return a >= b; } - + private static boolean gte(double a, double b) { return a >= b; } - + private static boolean gte(boolean a, boolean b) { - throw new ClassCastException("Cannot apply [>=] operation to type [boolean]"); + throw new ClassCastException("Cannot apply [>=] operation to type [boolean]"); } private static boolean gte(Object left, Object right) { @@ -819,10 +819,10 @@ private static boolean gte(Object left, Object right) { throw new ClassCastException("Cannot apply [>] operation to types " + "[" + left.getClass().getCanonicalName() + "] and [" + right.getClass().getCanonicalName() + "]."); } - + // helper methods to convert an integral according to numeric promotion // this is used by the generic code for bitwise and shift operators - + private static long longIntegralValue(Object o) { if (o instanceof Long) { return (long)o; @@ -834,7 +834,7 @@ private static long longIntegralValue(Object o) { throw new ClassCastException("Cannot convert [" + o.getClass().getCanonicalName() + "] to an integral value."); } } - + private static int intIntegralValue(Object o) { if (o instanceof Integer || o instanceof Short || o instanceof Byte) { return ((Number)o).intValue(); @@ -844,29 +844,29 @@ private static int intIntegralValue(Object o) { throw new ClassCastException("Cannot convert [" + o.getClass().getCanonicalName() + "] to an integral value."); } } - + // bitwise operators: valid only for integral types private static int and(int a, int b) { return a & b; } - + private static long and(long a, long b) { return a & b; } - + private static float and(float a, float b) { - throw new ClassCastException("Cannot apply [&] operation to type [float]"); + throw new ClassCastException("Cannot apply [&] operation to type [float]"); } - + private static double and(double a, double b) { - throw new ClassCastException("Cannot apply [&] operation to type [float]"); + throw new ClassCastException("Cannot apply [&] operation to type [float]"); } - + private static boolean and(boolean a, boolean b) { return a & b; } - + private static Object and(Object left, Object right) { if (left instanceof Boolean && right instanceof Boolean) { return (boolean)left & (boolean)right; @@ -876,23 +876,23 @@ private static Object and(Object left, Object right) { return intIntegralValue(left) & intIntegralValue(right); } } - + private static int xor(int a, int b) { return a ^ b; } - + private static long xor(long a, long b) { return a ^ b; } - + private static float xor(float a, float b) { - throw new ClassCastException("Cannot apply [^] operation to type [float]"); + throw new ClassCastException("Cannot apply [^] operation to type [float]"); } - + private static double xor(double a, double b) { - throw new ClassCastException("Cannot apply [^] operation to type [float]"); + throw new ClassCastException("Cannot apply [^] operation to type [float]"); } - + private static boolean xor(boolean a, boolean b) { return a ^ b; } @@ -910,23 +910,23 @@ private static Object xor(Object left, Object right) { private static int or(int a, int b) { return a | b; } - + private static long or(long a, long b) { return a | b; } - + private static float or(float a, float b) { - throw new ClassCastException("Cannot apply [|] operation to type [float]"); + throw new ClassCastException("Cannot apply [|] operation to type [float]"); } - + private static double or(double a, double b) { - throw new ClassCastException("Cannot apply [|] operation to type [float]"); + throw new ClassCastException("Cannot apply [|] operation to type [float]"); } - + private static boolean or(boolean a, boolean b) { return a | b; } - + private static Object or(Object left, Object right) { if (left instanceof Boolean && right instanceof Boolean) { return (boolean)left | (boolean)right; @@ -936,30 +936,30 @@ private static Object or(Object left, Object right) { return intIntegralValue(left) | intIntegralValue(right); } } - + // shift operators, valid for any integral types, but does not promote. // we implement all shifts as long shifts, because the extra bits are ignored anyway. - + private static int lsh(int a, long b) { return a << b; } - + private static long lsh(long a, long b) { return a << b; } - + private static float lsh(float a, long b) { - throw new ClassCastException("Cannot apply [<<] operation to type [float]"); + throw new ClassCastException("Cannot apply [<<] operation to type [float]"); } - + private static double lsh(double a, long b) { - throw new ClassCastException("Cannot apply [<<] operation to type [double]"); + throw new ClassCastException("Cannot apply [<<] operation to type [double]"); } - + private static boolean lsh(boolean a, long b) { - throw new ClassCastException("Cannot apply [<<] operation to type [boolean]"); + throw new ClassCastException("Cannot apply [<<] operation to type [boolean]"); } - + public static Object lsh(Object left, long right) { if (left instanceof Long) { return (long)(left) << right; @@ -967,25 +967,25 @@ public static Object lsh(Object left, long right) { return intIntegralValue(left) << right; } } - + private static int rsh(int a, long b) { return a >> b; } - + private static long rsh(long a, long b) { return a >> b; } - + private static float rsh(float a, long b) { - throw new ClassCastException("Cannot apply [>>] operation to type [float]"); + throw new ClassCastException("Cannot apply [>>] operation to type [float]"); } - + private static double rsh(double a, long b) { - throw new ClassCastException("Cannot apply [>>] operation to type [double]"); + throw new ClassCastException("Cannot apply [>>] operation to type [double]"); } - + private static boolean rsh(boolean a, long b) { - throw new ClassCastException("Cannot apply [>>] operation to type [boolean]"); + throw new ClassCastException("Cannot apply [>>] operation to type [boolean]"); } public static Object rsh(Object left, long right) { @@ -995,25 +995,25 @@ public static Object rsh(Object left, long right) { return intIntegralValue(left) >> right; } } - + private static int ush(int a, long b) { return a >>> b; } - + private static long ush(long a, long b) { return a >>> b; } - + private static float ush(float a, long b) { - throw new ClassCastException("Cannot apply [>>>] operation to type [float]"); + throw new ClassCastException("Cannot apply [>>>] operation to type [float]"); } - + private static double ush(double a, long b) { - throw new ClassCastException("Cannot apply [>>>] operation to type [double]"); + throw new ClassCastException("Cannot apply [>>>] operation to type [double]"); } - + private static boolean ush(boolean a, long b) { - throw new ClassCastException("Cannot apply [>>>] operation to type [boolean]"); + throw new ClassCastException("Cannot apply [>>>] operation to type [boolean]"); } public static Object ush(Object left, long right) { @@ -1023,15 +1023,15 @@ public static Object ush(Object left, long right) { return intIntegralValue(left) >>> right; } } - - /** - * unboxes a class to its primitive type, or returns the original + + /** + * unboxes a class to its primitive type, or returns the original * class if its not a boxed type. */ private static Class unbox(Class clazz) { return MethodType.methodType(clazz).unwrap().returnType(); } - + /** Unary promotion. All Objects are promoted to Object. */ private static Class promote(Class clazz) { // if either is a non-primitive type -> Object. @@ -1039,25 +1039,25 @@ private static Class promote(Class clazz) { return Object.class; } // always promoted to integer - if (clazz == byte.class || clazz == short.class || clazz == char.class || clazz == int.class) { - return int.class; - } else { - return clazz; - } + if (clazz == byte.class || clazz == short.class || clazz == char.class || clazz == int.class) { + return int.class; + } else { + return clazz; + } } - + /** Binary promotion. */ private static Class promote(Class a, Class b) { // if either is a non-primitive type -> Object. if (a.isPrimitive() == false || b.isPrimitive() == false) { return Object.class; } - + // boolean -> boolean if (a == boolean.class && b == boolean.class) { return boolean.class; } - + // ordinary numeric promotion if (a == double.class || b == double.class) { return double.class; @@ -1069,7 +1069,7 @@ private static Class promote(Class a, Class b) { return int.class; } } - + private static final Lookup PRIV_LOOKUP = MethodHandles.lookup(); private static final Map,Map> TYPE_OP_MAPPING = Collections.unmodifiableMap( @@ -1107,7 +1107,7 @@ private static Class promote(Class a, Class b) { } })) ); - + /** Returns an appropriate method handle for a unary or shift operator, based only on the receiver (LHS) */ public static MethodHandle lookupUnary(Class receiverClass, String name) { MethodHandle handle = TYPE_OP_MAPPING.get(promote(unbox(receiverClass))).get(name); @@ -1116,7 +1116,7 @@ public static MethodHandle lookupUnary(Class receiverClass, String name) { } return handle; } - + /** Returns an appropriate method handle for a binary operator, based on promotion of the LHS and RHS arguments */ public static MethodHandle lookupBinary(Class classA, Class classB, String name) { MethodHandle handle = TYPE_OP_MAPPING.get(promote(promote(unbox(classA)), promote(unbox(classB)))).get(name); @@ -1125,7 +1125,7 @@ public static MethodHandle lookupBinary(Class classA, Class classB, String } return handle; } - + /** Returns a generic method handle for any operator, that can handle all valid signatures, nulls, corner cases */ public static MethodHandle lookupGeneric(String name) { return TYPE_OP_MAPPING.get(Object.class).get(name); @@ -1143,7 +1143,7 @@ static Object dynamicReceiverCast(Object returnValue, Object lhs) { return returnValue; } } - + /** * Slow dynamic cast: casts {@code value} to an instance of {@code clazz} * based upon inspection. If {@code lhs} is null, no cast takes place. @@ -1173,7 +1173,7 @@ static Object dynamicCast(Class clazz, Object value) { return value; } } - + /** Slowly returns a Number for o. Just for supporting dynamicCast */ static Number getNumber(Object o) { if (o instanceof Number) { @@ -1184,17 +1184,17 @@ static Number getNumber(Object o) { throw new ClassCastException("Cannot convert [" + o.getClass() + "] to a Number"); } } - + private static final MethodHandle DYNAMIC_CAST; private static final MethodHandle DYNAMIC_RECEIVER_CAST; static { final Lookup lookup = MethodHandles.lookup(); try { - DYNAMIC_CAST = lookup.findStatic(lookup.lookupClass(), - "dynamicCast", + DYNAMIC_CAST = lookup.findStatic(lookup.lookupClass(), + "dynamicCast", MethodType.methodType(Object.class, Class.class, Object.class)); - DYNAMIC_RECEIVER_CAST = lookup.findStatic(lookup.lookupClass(), - "dynamicReceiverCast", + DYNAMIC_RECEIVER_CAST = lookup.findStatic(lookup.lookupClass(), + "dynamicReceiverCast", MethodType.methodType(Object.class, Object.class, Object.class)); } catch (ReflectiveOperationException e) { throw new AssertionError(e); @@ -1204,7 +1204,7 @@ static Number getNumber(Object o) { /** Looks up generic method, with a dynamic cast to the receiver's type. (compound assignment) */ public static MethodHandle dynamicCast(MethodHandle target) { // adapt dynamic receiver cast to the generic method - MethodHandle cast = DYNAMIC_RECEIVER_CAST.asType(MethodType.methodType(target.type().returnType(), + MethodHandle cast = DYNAMIC_RECEIVER_CAST.asType(MethodType.methodType(target.type().returnType(), target.type().returnType(), target.type().parameterType(0))); // drop the RHS parameter @@ -1212,7 +1212,7 @@ public static MethodHandle dynamicCast(MethodHandle target) { // combine: f(x,y) -> g(f(x,y), x, y); return MethodHandles.foldArguments(cast, target); } - + /** Looks up generic method, with a dynamic cast to the specified type. (explicit assignment) */ public static MethodHandle dynamicCast(MethodHandle target, Class desired) { // adapt dynamic cast to the generic method @@ -1221,23 +1221,23 @@ public static MethodHandle dynamicCast(MethodHandle target, Class desired) { MethodHandle cast = DYNAMIC_CAST.bindTo(desired); return MethodHandles.filterReturnValue(target, cast); } - + /** Forces a cast to class A for target (only if types differ) */ public static MethodHandle cast(Class classA, MethodHandle target) { MethodType newType = MethodType.methodType(classA).unwrap(); MethodType targetType = MethodType.methodType(target.type().returnType()).unwrap(); - + // don't do a conversion if types are the same. explicitCastArguments has this opto, // but we do it explicitly, to make the boolean check simpler if (newType.returnType() == targetType.returnType()) { return target; } - + // we don't allow the to/from boolean conversions of explicitCastArguments if (newType.returnType() == boolean.class || targetType.returnType() == boolean.class) { throw new ClassCastException("Cannot cast " + targetType.returnType() + " to " + newType.returnType()); } - + // null return values are not possible for our arguments. return MethodHandles.explicitCastArguments(target, target.type().changeReturnType(newType.returnType())); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 75575d6f12568..25145a44b5853 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -21,6 +21,7 @@ import org.elasticsearch.painless.spi.Whitelist; import org.objectweb.asm.Opcodes; +import org.objectweb.asm.Type; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; @@ -48,35 +49,6 @@ public final class Definition { private static final Pattern TYPE_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); - /** Some native types as constants: */ - public final Type voidType; - public final Type booleanType; - public final Type BooleanType; - public final Type byteType; - public final Type ByteType; - public final Type shortType; - public final Type ShortType; - public final Type intType; - public final Type IntegerType; - public final Type longType; - public final Type LongType; - public final Type floatType; - public final Type FloatType; - public final Type doubleType; - public final Type DoubleType; - public final Type charType; - public final Type CharacterType; - public final Type ObjectType; - public final Type DefType; - public final Type NumberType; - public final Type StringType; - public final Type ExceptionType; - public final Type PatternType; - public final Type MatcherType; - public final Type IteratorType; - public final Type ArrayListType; - public final Type HashMapType; - /** Marker class for def type to be used during type analysis. */ public static final class def { private def() { @@ -84,53 +56,6 @@ private def() { } } - public static final class Type { - public final String name; - public final int dimensions; - public final boolean dynamic; - public final Struct struct; - public final Class clazz; - public final org.objectweb.asm.Type type; - - private Type(final String name, final int dimensions, final boolean dynamic, - final Struct struct, final Class clazz, final org.objectweb.asm.Type type) { - this.name = name; - this.dimensions = dimensions; - this.dynamic = dynamic; - this.struct = struct; - this.clazz = clazz; - this.type = type; - } - - @Override - public boolean equals(final Object object) { - if (this == object) { - return true; - } - - if (object == null || getClass() != object.getClass()) { - return false; - } - - final Type type = (Type)object; - - return this.type.equals(type.type) && struct.equals(type.struct); - } - - @Override - public int hashCode() { - int result = struct.hashCode(); - result = 31 * result + type.hashCode(); - - return result; - } - - @Override - public String toString() { - return name; - } - } - public static class Method { public final String name; public final Struct owner; @@ -431,21 +356,6 @@ private Cast(Class from, Class to, boolean explicit, Class unboxFrom, C } } - /** Returns whether or not a non-array type exists. */ - public boolean isSimpleType(final String name) { - return structsMap.containsKey(name); - } - - /** Gets the type given by its name */ - public Type getType(final String name) { - return getTypeInternal(name); - } - - /** Creates an array type from the given Struct. */ - public Type getType(final Struct struct, final int dimensions) { - return getTypeInternal(struct, dimensions); - } - public static Class getBoxedType(Class clazz) { if (clazz == boolean.class) { return Boolean.class; @@ -502,6 +412,10 @@ public static boolean isConstantType(Class clazz) { clazz == String.class; } + public Class getClassFromBinaryName(String painlessType) { + return painlessTypesToJavaClasses.get(painlessType.replace('$', '.')); + } + public static Class ObjectClassTodefClass(Class clazz) { if (clazz.isArray()) { Class component = clazz.getComponentType(); @@ -590,53 +504,6 @@ public static String ClassToName(Class clazz) { return clazz.getCanonicalName().replace('$', '.'); } - public Type ClassToType(Class clazz) { - if (clazz == null) { - return null; - } else if (clazz.isArray()) { - Class component = clazz.getComponentType(); - int dimensions = 1; - - while (component.isArray()) { - component = component.getComponentType(); - ++dimensions; - } - - if (component == def.class) { - return getType(structsMap.get(def.class.getSimpleName()), dimensions); - } else { - return getType(structsMap.get(ClassToName(component)), dimensions); - } - } else if (clazz == def.class) { - return getType(structsMap.get(def.class.getSimpleName()), 0); - } - - return getType(structsMap.get(ClassToName(clazz)), 0); - } - - public Struct RuntimeClassToStruct(Class clazz) { - return structsMap.get(ClassToName(clazz)); - } - - public static Class TypeToClass(Type type) { - if (def.class.getSimpleName().equals(type.struct.name)) { - return ObjectClassTodefClass(type.clazz); - } - - return type.clazz; - } - - public Class getClassFromBinaryName(String name) { - Struct struct = structsMap.get(name.replace('$', '.')); - - return struct == null ? null : struct.clazz; - } - - /** Collection of all simple types. Used by {@code PainlessDocGenerator} to generate an API reference. */ - Collection allSimpleTypes() { - return simpleTypesMap.values(); - } - private static String buildMethodCacheKey(String structName, String methodName, List> arguments) { StringBuilder key = new StringBuilder(); key.append(structName); @@ -653,21 +520,21 @@ private static String buildFieldCacheKey(String structName, String fieldName, St return structName + fieldName + typeName; } - // INTERNAL IMPLEMENTATION: + public Collection getStructs() { + return javaClassesToPainlessStructs.values(); + } - private final Map structsMap; - private final Map simpleTypesMap; + private final Map> painlessTypesToJavaClasses; + private final Map, Struct> javaClassesToPainlessStructs; public Definition(List whitelists) { - structsMap = new HashMap<>(); - simpleTypesMap = new HashMap<>(); + painlessTypesToJavaClasses = new HashMap<>(); + javaClassesToPainlessStructs = new HashMap<>(); - Map, Struct> javaClassesToPainlessStructs = new HashMap<>(); String origin = null; - // add the universal def type - structsMap.put(def.class.getSimpleName(), - new Struct(def.class.getSimpleName(), Object.class, org.objectweb.asm.Type.getType(Object.class))); + painlessTypesToJavaClasses.put("def", def.class); + javaClassesToPainlessStructs.put(def.class, new Struct("def", Object.class, Type.getType(Object.class))); try { // first iteration collects all the Painless type names that @@ -675,7 +542,7 @@ public Definition(List whitelists) { for (Whitelist whitelist : whitelists) { for (Whitelist.Struct whitelistStruct : whitelist.whitelistStructs) { String painlessTypeName = whitelistStruct.javaClassName.replace('$', '.'); - Struct painlessStruct = structsMap.get(painlessTypeName); + Struct painlessStruct = javaClassesToPainlessStructs.get(painlessTypesToJavaClasses.get(painlessTypeName)); if (painlessStruct != null && painlessStruct.clazz.getName().equals(whitelistStruct.javaClassName) == false) { throw new IllegalArgumentException("struct [" + painlessStruct.name + "] cannot represent multiple classes " + @@ -685,7 +552,7 @@ public Definition(List whitelists) { origin = whitelistStruct.origin; addStruct(whitelist.javaClassLoader, whitelistStruct); - painlessStruct = structsMap.get(painlessTypeName); + painlessStruct = javaClassesToPainlessStructs.get(painlessTypesToJavaClasses.get(painlessTypeName)); javaClassesToPainlessStructs.put(painlessStruct.clazz, painlessStruct); } } @@ -719,13 +586,8 @@ public Definition(List whitelists) { // goes through each Painless struct and determines the inheritance list, // and then adds all inherited types to the Painless struct's whitelist - for (Map.Entry painlessNameStructEntry : structsMap.entrySet()) { - String painlessStructName = painlessNameStructEntry.getKey(); - Struct painlessStruct = painlessNameStructEntry.getValue(); - - if (painlessStruct.name.equals(painlessStructName) == false) { - continue; - } + for (Class javaClass : javaClassesToPainlessStructs.keySet()) { + Struct painlessStruct = javaClassesToPainlessStructs.get(javaClass); List painlessSuperStructs = new ArrayList<>(); Class javaSuperClass = painlessStruct.clazz.getSuperclass(); @@ -782,52 +644,14 @@ public Definition(List whitelists) { } // precompute runtime classes - for (String painlessStructName : structsMap.keySet()) { - Struct painlessStruct = structsMap.get(painlessStructName); - - if (painlessStruct.name.equals(painlessStructName) == false) { - continue; - } - + for (Struct painlessStruct : javaClassesToPainlessStructs.values()) { addRuntimeClass(painlessStruct); } // copy all structs to make them unmodifiable for outside users: - for (Map.Entry entry : structsMap.entrySet()) { - if (entry.getKey().equals(entry.getValue().name) == false) { - continue; - } - + for (Map.Entry,Struct> entry : javaClassesToPainlessStructs.entrySet()) { entry.setValue(entry.getValue().freeze(computeFunctionalInterfaceMethod(entry.getValue()))); } - - voidType = getType("void"); - booleanType = getType("boolean"); - BooleanType = getType("Boolean"); - byteType = getType("byte"); - ByteType = getType("Byte"); - shortType = getType("short"); - ShortType = getType("Short"); - intType = getType("int"); - IntegerType = getType("Integer"); - longType = getType("long"); - LongType = getType("Long"); - floatType = getType("float"); - FloatType = getType("Float"); - doubleType = getType("double"); - DoubleType = getType("Double"); - charType = getType("char"); - CharacterType = getType("Character"); - ObjectType = getType("Object"); - DefType = getType(def.class.getSimpleName()); - NumberType = getType("Number"); - StringType = getType("String"); - ExceptionType = getType("Exception"); - PatternType = getType("Pattern"); - MatcherType = getType("Matcher"); - IteratorType = getType("Iterator"); - ArrayListType = getType("ArrayList"); - HashMapType = getType("HashMap"); } private void addStruct(ClassLoader whitelistClassLoader, Whitelist.Struct whitelistStruct) { @@ -864,35 +688,45 @@ private void addStruct(ClassLoader whitelistClassLoader, Whitelist.Struct whitel } } - Struct existingStruct = structsMap.get(painlessTypeName); + Struct existingStruct = javaClassesToPainlessStructs.get(javaClass); if (existingStruct == null) { Struct struct = new Struct(painlessTypeName, javaClass, org.objectweb.asm.Type.getType(javaClass)); - structsMap.put(painlessTypeName, struct); - - if (whitelistStruct.onlyFQNJavaClassName) { - simpleTypesMap.put(painlessTypeName, getType(painlessTypeName)); - } else if (simpleTypesMap.containsKey(importedPainlessTypeName) == false) { - simpleTypesMap.put(importedPainlessTypeName, getType(painlessTypeName)); - structsMap.put(importedPainlessTypeName, struct); - } else { - throw new IllegalArgumentException("duplicate short name [" + importedPainlessTypeName + "] " + - "found for struct [" + painlessTypeName + "]"); - } + painlessTypesToJavaClasses.put(painlessTypeName, javaClass); + javaClassesToPainlessStructs.put(javaClass, struct); } else if (existingStruct.clazz.equals(javaClass) == false) { throw new IllegalArgumentException("struct [" + painlessTypeName + "] is used to " + "illegally represent multiple java classes [" + whitelistStruct.javaClassName + "] and " + "[" + existingStruct.clazz.getName() + "]"); - } else if (whitelistStruct.onlyFQNJavaClassName && simpleTypesMap.containsKey(importedPainlessTypeName) && - simpleTypesMap.get(importedPainlessTypeName).clazz == javaClass || - whitelistStruct.onlyFQNJavaClassName == false && (simpleTypesMap.containsKey(importedPainlessTypeName) == false || - simpleTypesMap.get(importedPainlessTypeName).clazz != javaClass)) { - throw new IllegalArgumentException("inconsistent only_fqn parameters found for type [" + painlessTypeName + "]"); + } + + if (painlessTypeName.equals(importedPainlessTypeName)) { + if (whitelistStruct.onlyFQNJavaClassName == false) { + throw new IllegalArgumentException("must use only_fqn parameter on type [" + painlessTypeName + "] with no package"); + } + } else { + Class importedJavaClass = painlessTypesToJavaClasses.get(importedPainlessTypeName); + + if (importedJavaClass == null) { + if (whitelistStruct.onlyFQNJavaClassName == false) { + if (existingStruct != null) { + throw new IllegalArgumentException("inconsistent only_fqn parameters found for type [" + painlessTypeName + "]"); + } + + painlessTypesToJavaClasses.put(importedPainlessTypeName, javaClass); + } + } else if (importedJavaClass.equals(javaClass) == false) { + throw new IllegalArgumentException("imported name [" + painlessTypeName + "] is used to " + + "illegally represent multiple java classes [" + whitelistStruct.javaClassName + "] " + + "and [" + importedJavaClass.getName() + "]"); + } else if (whitelistStruct.onlyFQNJavaClassName) { + throw new IllegalArgumentException("inconsistent only_fqn parameters found for type [" + painlessTypeName + "]"); + } } } private void addConstructor(String ownerStructName, Whitelist.Constructor whitelistConstructor) { - Struct ownerStruct = structsMap.get(ownerStructName); + Struct ownerStruct = javaClassesToPainlessStructs.get(painlessTypesToJavaClasses.get(ownerStructName)); if (ownerStruct == null) { throw new IllegalArgumentException("owner struct [" + ownerStructName + "] not defined for constructor with " + @@ -906,7 +740,7 @@ private void addConstructor(String ownerStructName, Whitelist.Constructor whitel String painlessParameterTypeName = whitelistConstructor.painlessParameterTypeNames.get(parameterCount); try { - Class painlessParameterClass = TypeToClass(getTypeInternal(painlessParameterTypeName)); + Class painlessParameterClass = getJavaClassFromPainlessType(painlessParameterTypeName); painlessParametersTypes.add(painlessParameterClass); javaClassParameters[parameterCount] = defClassToObjectClass(painlessParameterClass); @@ -952,7 +786,7 @@ private void addConstructor(String ownerStructName, Whitelist.Constructor whitel } private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, Whitelist.Method whitelistMethod) { - Struct ownerStruct = structsMap.get(ownerStructName); + Struct ownerStruct = javaClassesToPainlessStructs.get(painlessTypesToJavaClasses.get(ownerStructName)); if (ownerStruct == null) { throw new IllegalArgumentException("owner struct [" + ownerStructName + "] not defined for method with " + @@ -991,7 +825,7 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, String painlessParameterTypeName = whitelistMethod.painlessParameterTypeNames.get(parameterCount); try { - Class painlessParameterClass = TypeToClass(getTypeInternal(painlessParameterTypeName)); + Class painlessParameterClass = getJavaClassFromPainlessType(painlessParameterTypeName); painlessParametersTypes.add(painlessParameterClass); javaClassParameters[parameterCount + augmentedOffset] = defClassToObjectClass(painlessParameterClass); @@ -1016,7 +850,7 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, Class painlessReturnClass; try { - painlessReturnClass = TypeToClass(getTypeInternal(whitelistMethod.painlessReturnTypeName)); + painlessReturnClass = getJavaClassFromPainlessType(whitelistMethod.painlessReturnTypeName); } catch (IllegalArgumentException iae) { throw new IllegalArgumentException("struct not defined for return type [" + whitelistMethod.painlessReturnTypeName + "] " + "with owner struct [" + ownerStructName + "] and method with name [" + whitelistMethod.javaMethodName + "] " + @@ -1088,7 +922,7 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, } private void addField(String ownerStructName, Whitelist.Field whitelistField) { - Struct ownerStruct = structsMap.get(ownerStructName); + Struct ownerStruct = javaClassesToPainlessStructs.get(painlessTypesToJavaClasses.get(ownerStructName)); if (ownerStruct == null) { throw new IllegalArgumentException("owner struct [" + ownerStructName + "] not defined for method with " + @@ -1112,7 +946,7 @@ private void addField(String ownerStructName, Whitelist.Field whitelistField) { Class painlessFieldClass; try { - painlessFieldClass = TypeToClass(getTypeInternal(whitelistField.painlessFieldTypeName)); + painlessFieldClass = getJavaClassFromPainlessType(whitelistField.painlessFieldTypeName); } catch (IllegalArgumentException iae) { throw new IllegalArgumentException("struct not defined for return type [" + whitelistField.painlessFieldTypeName + "] " + "with owner struct [" + ownerStructName + "] and field with name [" + whitelistField.javaFieldName + "]", iae); @@ -1169,14 +1003,14 @@ private void addField(String ownerStructName, Whitelist.Field whitelistField) { } private void copyStruct(String struct, List children) { - final Struct owner = structsMap.get(struct); + final Struct owner = javaClassesToPainlessStructs.get(painlessTypesToJavaClasses.get(struct)); if (owner == null) { throw new IllegalArgumentException("Owner struct [" + struct + "] not defined for copy."); } for (int count = 0; count < children.size(); ++count) { - final Struct child = structsMap.get(children.get(count)); + final Struct child = javaClassesToPainlessStructs.get(painlessTypesToJavaClasses.get(children.get(count))); if (child == null) { throw new IllegalArgumentException("Child struct [" + children.get(count) + "]" + @@ -1340,71 +1174,68 @@ private Method computeFunctionalInterfaceMethod(Struct clazz) { return painless; } - private Type getTypeInternal(String name) { - // simple types (e.g. 0 array dimensions) are a simple hash lookup for speed - Type simple = simpleTypesMap.get(name); + public boolean isSimplePainlessType(String painlessType) { + return painlessTypesToJavaClasses.containsKey(painlessType); + } - if (simple != null) { - return simple; - } + public Struct getPainlessStructFromJavaClass(Class clazz) { + return javaClassesToPainlessStructs.get(clazz); + } - int dimensions = getDimensions(name); - String structstr = dimensions == 0 ? name : name.substring(0, name.indexOf('[')); - Struct struct = structsMap.get(structstr); + public Class getJavaClassFromPainlessType(String painlessType) { + Class javaClass = painlessTypesToJavaClasses.get(painlessType); - if (struct == null) { - throw new IllegalArgumentException("The struct with name [" + name + "] has not been defined."); + if (javaClass != null) { + return javaClass; } + int arrayDimensions = 0; + int arrayIndex = painlessType.indexOf('['); - return getTypeInternal(struct, dimensions); - } - - private Type getTypeInternal(Struct struct, int dimensions) { - String name = struct.name; - org.objectweb.asm.Type type = struct.type; - Class clazz = struct.clazz; + if (arrayIndex != -1) { + int length = painlessType.length(); - if (dimensions > 0) { - StringBuilder builder = new StringBuilder(name); - char[] brackets = new char[dimensions]; - - for (int count = 0; count < dimensions; ++count) { - builder.append("[]"); - brackets[count] = '['; + while (arrayIndex < length) { + if (painlessType.charAt(arrayIndex) == '[' && ++arrayIndex < length && painlessType.charAt(arrayIndex++) == ']') { + ++arrayDimensions; + } else { + throw new IllegalArgumentException("invalid painless type [" + painlessType + "]."); + } } - String descriptor = new String(brackets) + struct.type.getDescriptor(); - - name = builder.toString(); - type = org.objectweb.asm.Type.getType(descriptor); - - try { - clazz = Class.forName(type.getInternalName().replace('/', '.')); - } catch (ClassNotFoundException exception) { - throw new IllegalArgumentException("The class [" + type.getInternalName() + "]" + - " could not be found to create type [" + name + "]."); + painlessType = painlessType.substring(0, painlessType.indexOf('[')); + javaClass = painlessTypesToJavaClasses.get(painlessType); + + char braces[] = new char[arrayDimensions]; + Arrays.fill(braces, '['); + String descriptor = new String(braces); + + if (javaClass == boolean.class) { + descriptor += "Z"; + } else if (javaClass == byte.class) { + descriptor += "B"; + } else if (javaClass == short.class) { + descriptor += "S"; + } else if (javaClass == char.class) { + descriptor += "C"; + } else if (javaClass == int.class) { + descriptor += "I"; + } else if (javaClass == long.class) { + descriptor += "J"; + } else if (javaClass == float.class) { + descriptor += "F"; + } else if (javaClass == double.class) { + descriptor += "D"; + } else { + descriptor += "L" + javaClass.getName() + ";"; } - } - - return new Type(name, dimensions, def.class.getSimpleName().equals(name), struct, clazz, type); - } - private int getDimensions(String name) { - int dimensions = 0; - int index = name.indexOf('['); - - if (index != -1) { - int length = name.length(); - - while (index < length) { - if (name.charAt(index) == '[' && ++index < length && name.charAt(index++) == ']') { - ++dimensions; - } else { - throw new IllegalArgumentException("Invalid array braces in canonical name [" + name + "]."); - } + try { + return Class.forName(descriptor); + } catch (ClassNotFoundException cnfe) { + throw new IllegalStateException("invalid painless type [" + painlessType + "]", cnfe); } } - return dimensions; + throw new IllegalArgumentException("invalid painless type [" + painlessType + "]"); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java index 0b698dd244192..1b438965538ce 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java @@ -78,7 +78,7 @@ public class FunctionRef { * @param numCaptures number of captured arguments */ public FunctionRef(Definition definition, Class expected, String type, String call, int numCaptures) { - this(expected, definition.ClassToType(expected).struct.functionalMethod, + this(expected, definition.getPainlessStructFromJavaClass(expected).functionalMethod, lookup(definition, expected, type, call, numCaptures > 0), numCaptures); } @@ -162,14 +162,14 @@ private static Definition.Method lookup(Definition definition, Class expected String type, String call, boolean receiverCaptured) { // check its really a functional interface // for e.g. Comparable - Method method = definition.ClassToType(expected).struct.functionalMethod; + Method method = definition.getPainlessStructFromJavaClass(expected).functionalMethod; if (method == null) { throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " + "to [" + Definition.ClassToName(expected) + "], not a functional interface"); } // lookup requested method - Definition.Struct struct = definition.getType(type).struct; + Definition.Struct struct = definition.getPainlessStructFromJavaClass(definition.getJavaClassFromPainlessType(type)); final Definition.Method impl; // ctor ref if ("new".equals(call)) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExplainError.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExplainError.java index 0b2fdf35890a0..7ae93eba22632 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExplainError.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExplainError.java @@ -54,7 +54,7 @@ public Map> getHeaders(Definition definition) { if (objectToExplain != null) { toString = objectToExplain.toString(); javaClassName = objectToExplain.getClass().getName(); - Definition.Struct struct = definition.ClassToType(objectToExplain.getClass()).struct; + Definition.Struct struct = definition.getPainlessStructFromJavaClass(objectToExplain.getClass()); if (struct != null) { painlessClassName = struct.name; } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 4ebcf8bfb82d2..833ff0eac4134 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -31,8 +31,8 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.painless.spi.PainlessExtension; import org.elasticsearch.painless.spi.Whitelist; -import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java index 60ce1d033532a..0ec806282db2f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java @@ -179,22 +179,18 @@ private MethodArgument methodArgument(Definition definition, Class clazz, Str private static Class definitionTypeForClass(Definition definition, Class type, Function, String> unknownErrorMessageSource) { - int dimensions = 0; + type = Definition.ObjectClassTodefClass(type); Class componentType = type; + while (componentType.isArray()) { - dimensions++; componentType = componentType.getComponentType(); } - Definition.Struct struct; - if (componentType == Object.class) { - struct = definition.getType("def").struct; - } else { - if (definition.RuntimeClassToStruct(componentType) == null) { - throw new IllegalArgumentException(unknownErrorMessageSource.apply(componentType)); - } - struct = definition.RuntimeClassToStruct(componentType); + + if (definition.getPainlessStructFromJavaClass(componentType) == null) { + throw new IllegalArgumentException(unknownErrorMessageSource.apply(componentType)); } - return Definition.TypeToClass(definition.getType(struct, dimensions)); + + return type; } private static String[] readArgumentNamesConstant(Class iface) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java index cf24a47386603..add3aaabe51e0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/EnhancedPainlessLexer.java @@ -74,8 +74,8 @@ public void recover(final LexerNoViableAltException lnvae) { } @Override - protected boolean isSimpleType(String name) { - return definition.isSimpleType(name); + protected boolean isType(String name) { + return definition.isSimplePainlessType(name); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java index dd62701b86e4d..7fa10f6e9fbf2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java @@ -1,9 +1,16 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.painless.antlr; -import org.antlr.v4.runtime.Lexer; + import org.antlr.v4.runtime.CharStream; -import org.antlr.v4.runtime.*; -import org.antlr.v4.runtime.atn.*; +import org.antlr.v4.runtime.Lexer; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; import org.antlr.v4.runtime.dfa.DFA; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) @@ -14,16 +21,16 @@ abstract class PainlessLexer extends Lexer { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, - NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, - FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, - THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, - ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, - EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, - COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, - DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, - AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, - DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, + WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, + NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, + FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, + THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, + ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, + EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, + COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, + DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, + AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, + DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, ID=82, DOTINTEGER=83, DOTID=84; public static final int AFTER_DOT = 1; public static String[] modeNames = { @@ -31,39 +38,39 @@ abstract class PainlessLexer extends Lexer { }; public static final String[] ruleNames = { - "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", "DOT", - "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR", - "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", - "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", - "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", - "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", "REF", "ARROW", - "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", - "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", - "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", + "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", "DOT", + "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", "FOR", + "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", "THIS", + "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", "SUB", "LSH", + "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", "NER", "BWAND", + "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", "REF", "ARROW", + "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", "AMUL", "ADIV", + "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", "OCTAL", "HEX", + "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; private static final String[] _LITERAL_NAMES = { - null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", - "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", - "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", - "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", - "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", - "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", - "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", - "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, + null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", + "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", + "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", + "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", + "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", + "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", + "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", + "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, null, null, null, null, "'true'", "'false'", "'null'" }; private static final String[] _SYMBOLIC_NAMES = { - null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", - "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", - "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", - "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", - "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", - "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", - "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", - "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", - "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", + null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", + "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", + "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", + "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", + "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", + "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", + "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", + "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", + "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -106,7 +113,7 @@ public Vocabulary getVocabulary() { * See also * The lexer hack. */ - protected abstract boolean isSimpleType(String name); + protected abstract boolean isType(String name); /** * Is the preceding {@code /} a the beginning of a regex (true) or a division @@ -164,7 +171,7 @@ private boolean REGEX_sempred(RuleContext _localctx, int predIndex) { private boolean TYPE_sempred(RuleContext _localctx, int predIndex) { switch (predIndex) { case 2: - return isSimpleType(getText()) ; + return isType(getText()) ; } return true; } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java index 9cd3334aa51da..bef57d22e9ea9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java @@ -1,9 +1,24 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.painless.antlr; -import org.antlr.v4.runtime.atn.*; + +import org.antlr.v4.runtime.FailedPredicateException; +import org.antlr.v4.runtime.NoViableAltException; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ParserATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.*; -import org.antlr.v4.runtime.tree.*; +import org.antlr.v4.runtime.tree.ParseTreeVisitor; +import org.antlr.v4.runtime.tree.TerminalNode; + import java.util.List; @SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) @@ -14,57 +29,57 @@ class PainlessParser extends Parser { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, - NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, - FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, - THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, - ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, - EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, - COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, - DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, - AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, - DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, + WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, + NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, + FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, + THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, + ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, + EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, + COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, + DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, + AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, + DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, ID=82, DOTINTEGER=83, DOTID=84; public static final int - RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, - RULE_rstatement = 4, RULE_dstatement = 5, RULE_trailer = 6, RULE_block = 7, - RULE_empty = 8, RULE_initializer = 9, RULE_afterthought = 10, RULE_declaration = 11, - RULE_decltype = 12, RULE_declvar = 13, RULE_trap = 14, RULE_expression = 15, - RULE_unary = 16, RULE_chain = 17, RULE_primary = 18, RULE_postfix = 19, - RULE_postdot = 20, RULE_callinvoke = 21, RULE_fieldaccess = 22, RULE_braceaccess = 23, - RULE_arrayinitializer = 24, RULE_listinitializer = 25, RULE_mapinitializer = 26, - RULE_maptoken = 27, RULE_arguments = 28, RULE_argument = 29, RULE_lambda = 30, + RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, + RULE_rstatement = 4, RULE_dstatement = 5, RULE_trailer = 6, RULE_block = 7, + RULE_empty = 8, RULE_initializer = 9, RULE_afterthought = 10, RULE_declaration = 11, + RULE_decltype = 12, RULE_declvar = 13, RULE_trap = 14, RULE_expression = 15, + RULE_unary = 16, RULE_chain = 17, RULE_primary = 18, RULE_postfix = 19, + RULE_postdot = 20, RULE_callinvoke = 21, RULE_fieldaccess = 22, RULE_braceaccess = 23, + RULE_arrayinitializer = 24, RULE_listinitializer = 25, RULE_mapinitializer = 26, + RULE_maptoken = 27, RULE_arguments = 28, RULE_argument = 29, RULE_lambda = 30, RULE_lamtype = 31, RULE_funcref = 32; public static final String[] ruleNames = { - "source", "function", "parameters", "statement", "rstatement", "dstatement", - "trailer", "block", "empty", "initializer", "afterthought", "declaration", - "decltype", "declvar", "trap", "expression", "unary", "chain", "primary", - "postfix", "postdot", "callinvoke", "fieldaccess", "braceaccess", "arrayinitializer", - "listinitializer", "mapinitializer", "maptoken", "arguments", "argument", + "source", "function", "parameters", "statement", "rstatement", "dstatement", + "trailer", "block", "empty", "initializer", "afterthought", "declaration", + "decltype", "declvar", "trap", "expression", "unary", "chain", "primary", + "postfix", "postdot", "callinvoke", "fieldaccess", "braceaccess", "arrayinitializer", + "listinitializer", "mapinitializer", "maptoken", "arguments", "argument", "lambda", "lamtype", "funcref" }; private static final String[] _LITERAL_NAMES = { - null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", - "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", - "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", - "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", - "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", - "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", - "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", - "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, + null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", + "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", + "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", + "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", + "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", + "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", + "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", + "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, null, null, null, null, null, "'true'", "'false'", "'null'" }; private static final String[] _SYMBOLIC_NAMES = { - null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", - "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", - "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", - "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", - "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", - "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", - "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", - "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", - "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", + null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", + "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", + "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", + "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", + "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", + "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", + "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", + "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", + "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" }; public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); @@ -162,7 +177,7 @@ public final SourceContext source() throws RecognitionException { setState(66); function(); } - } + } } setState(71); _errHandler.sync(this); @@ -178,7 +193,7 @@ public final SourceContext source() throws RecognitionException { setState(72); statement(); } - } + } } setState(77); _errHandler.sync(this); @@ -426,7 +441,7 @@ public RstatementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_rstatement; } - + public RstatementContext() { } public void copyFrom(RstatementContext ctx) { super.copyFrom(ctx); @@ -805,7 +820,7 @@ public final RstatementContext rstatement() throws RecognitionException { match(TRY); setState(164); block(); - setState(166); + setState(166); _errHandler.sync(this); _alt = 1; do { @@ -821,7 +836,7 @@ public final RstatementContext rstatement() throws RecognitionException { default: throw new NoViableAltException(this); } - setState(168); + setState(168); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,12,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); @@ -845,7 +860,7 @@ public DstatementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_dstatement; } - + public DstatementContext() { } public void copyFrom(DstatementContext ctx) { super.copyFrom(ctx); @@ -1148,7 +1163,7 @@ public final BlockContext block() throws RecognitionException { setState(194); statement(); } - } + } } setState(199); _errHandler.sync(this); @@ -1407,7 +1422,7 @@ public final DecltypeContext decltype() throws RecognitionException { setState(224); match(RBRACE); } - } + } } setState(229); _errHandler.sync(this); @@ -1532,7 +1547,7 @@ public ExpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_expression; } - + public ExpressionContext() { } public void copyFrom(ExpressionContext ctx) { super.copyFrom(ctx); @@ -1943,7 +1958,7 @@ private ExpressionContext expression(int _p) throws RecognitionException { } break; } - } + } } setState(297); _errHandler.sync(this); @@ -1967,7 +1982,7 @@ public UnaryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_unary; } - + public UnaryContext() { } public void copyFrom(UnaryContext ctx) { super.copyFrom(ctx); @@ -2135,7 +2150,7 @@ public ChainContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_chain; } - + public ChainContext() { } public void copyFrom(ChainContext ctx) { super.copyFrom(ctx); @@ -2214,7 +2229,7 @@ public final ChainContext chain() throws RecognitionException { setState(314); postfix(); } - } + } } setState(319); _errHandler.sync(this); @@ -2240,7 +2255,7 @@ public final ChainContext chain() throws RecognitionException { setState(322); postfix(); } - } + } } setState(327); _errHandler.sync(this); @@ -2274,7 +2289,7 @@ public PrimaryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_primary; } - + public PrimaryContext() { } public void copyFrom(PrimaryContext ctx) { super.copyFrom(ctx); @@ -2799,7 +2814,7 @@ public ArrayinitializerContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_arrayinitializer; } - + public ArrayinitializerContext() { } public void copyFrom(ArrayinitializerContext ctx) { super.copyFrom(ctx); @@ -2886,7 +2901,7 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept match(NEW); setState(372); match(TYPE); - setState(377); + setState(377); _errHandler.sync(this); _alt = 1; do { @@ -2906,7 +2921,7 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept default: throw new NoViableAltException(this); } - setState(379); + setState(379); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,31,_ctx); } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); @@ -2927,7 +2942,7 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept setState(382); postfix(); } - } + } } setState(387); _errHandler.sync(this); @@ -2989,7 +3004,7 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept setState(406); postfix(); } - } + } } setState(411); _errHandler.sync(this); @@ -3542,7 +3557,7 @@ public FuncrefContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @Override public int getRuleIndex() { return RULE_funcref; } - + public FuncrefContext() { } public void copyFrom(FuncrefContext ctx) { super.copyFrom(ctx); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 3ac6cb7fd37c4..a481c99a99d12 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -986,19 +986,20 @@ public AExpression visitBraceaccess(BraceaccessContext ctx, AExpression prefix) @Override public ANode visitNewstandardarray(NewstandardarrayContext ctx) { - String type = ctx.TYPE().getText(); + StringBuilder type = new StringBuilder(ctx.TYPE().getText()); List expressions = new ArrayList<>(); for (ExpressionContext expression : ctx.expression()) { + type.append("[]"); expressions.add((AExpression)visit(expression)); } - return buildPostfixChain(new ENewArray(location(ctx), type, expressions, false), ctx.postdot(), ctx.postfix()); + return buildPostfixChain(new ENewArray(location(ctx), type.toString(), expressions, false), ctx.postdot(), ctx.postfix()); } @Override public ANode visitNewinitializedarray(NewinitializedarrayContext ctx) { - String type = ctx.TYPE().getText(); + String type = ctx.TYPE().getText() + "[]"; List expressions = new ArrayList<>(); for (ExpressionContext expression : ctx.expression()) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExplicit.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExplicit.java index eaf8045bf1c65..5a897e04a8d98 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExplicit.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExplicit.java @@ -19,7 +19,6 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; @@ -51,7 +50,7 @@ void extractVariables(Set variables) { @Override void analyze(Locals locals) { try { - actual = Definition.TypeToClass(locals.getDefinition().getType(type)); + actual = locals.getDefinition().getJavaClassFromPainlessType(type); } catch (IllegalArgumentException exception) { throw createError(new IllegalArgumentException("Not a type [" + type + "].")); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java index c82b1003a55f1..21bef9aa2ed5d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java @@ -66,7 +66,7 @@ void analyze(Locals locals) { try { if ("this".equals(type)) { // user's own function - Method interfaceMethod = locals.getDefinition().ClassToType(expected).struct.functionalMethod; + Method interfaceMethod = locals.getDefinition().getPainlessStructFromJavaClass(expected).functionalMethod; if (interfaceMethod == null) { throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " + "to [" + Definition.ClassToName(expected) + "], not a functional interface"); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EInstanceof.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EInstanceof.java index 54403b51f04bd..5296d79e214ed 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EInstanceof.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EInstanceof.java @@ -58,7 +58,7 @@ void analyze(Locals locals) { // ensure the specified type is part of the definition try { - clazz = Definition.TypeToClass(locals.getDefinition().getType(this.type)); + clazz = locals.getDefinition().getJavaClassFromPainlessType(this.type); } catch (IllegalArgumentException exception) { throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java index a7213e75ca485..e40d21ab110ab 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java @@ -120,7 +120,7 @@ void analyze(Locals locals) { } } else { // we know the method statically, infer return type and any unknown/def types - interfaceMethod = locals.getDefinition().ClassToType(expected).struct.functionalMethod; + interfaceMethod = locals.getDefinition().getPainlessStructFromJavaClass(expected).functionalMethod; if (interfaceMethod == null) { throw createError(new IllegalArgumentException("Cannot pass lambda to [" + Definition.ClassToName(expected) + "], not a functional interface")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EListInit.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EListInit.java index d957be0aadb50..05b10796cb4f9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EListInit.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EListInit.java @@ -61,13 +61,13 @@ void analyze(Locals locals) { actual = ArrayList.class; - constructor = locals.getDefinition().ClassToType(actual).struct.constructors.get(new MethodKey("", 0)); + constructor = locals.getDefinition().getPainlessStructFromJavaClass(actual).constructors.get(new MethodKey("", 0)); if (constructor == null) { throw createError(new IllegalStateException("Illegal tree structure.")); } - method = locals.getDefinition().ClassToType(actual).struct.methods.get(new MethodKey("add", 1)); + method = locals.getDefinition().getPainlessStructFromJavaClass(actual).methods.get(new MethodKey("add", 1)); if (method == null) { throw createError(new IllegalStateException("Illegal tree structure.")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EMapInit.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EMapInit.java index 2cd864da24b65..f5763042b8191 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EMapInit.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EMapInit.java @@ -67,13 +67,13 @@ void analyze(Locals locals) { actual = HashMap.class; - constructor = locals.getDefinition().ClassToType(actual).struct.constructors.get(new MethodKey("", 0)); + constructor = locals.getDefinition().getPainlessStructFromJavaClass(actual).constructors.get(new MethodKey("", 0)); if (constructor == null) { throw createError(new IllegalStateException("Illegal tree structure.")); } - method = locals.getDefinition().ClassToType(actual).struct.methods.get(new MethodKey("put", 2)); + method = locals.getDefinition().getPainlessStructFromJavaClass(actual).methods.get(new MethodKey("put", 2)); if (method == null) { throw createError(new IllegalStateException("Illegal tree structure.")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewArray.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewArray.java index c1d58cb2f2ad9..1a0a718ae7fc8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewArray.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewArray.java @@ -19,7 +19,6 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; @@ -38,8 +37,6 @@ public final class ENewArray extends AExpression { private final List arguments; private final boolean initialize; - private Class array; - public ENewArray(Location location, String type, List arguments, boolean initialize) { super(location); @@ -64,7 +61,7 @@ void analyze(Locals locals) { Class clazz; try { - clazz = Definition.TypeToClass(locals.getDefinition().getType(this.type)); + clazz = locals.getDefinition().getJavaClassFromPainlessType(this.type); } catch (IllegalArgumentException exception) { throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); } @@ -72,15 +69,13 @@ void analyze(Locals locals) { for (int argument = 0; argument < arguments.size(); ++argument) { AExpression expression = arguments.get(argument); - expression.expected = initialize ? clazz : int.class; + expression.expected = initialize ? clazz.getComponentType() : int.class; expression.internal = true; expression.analyze(locals); arguments.set(argument, expression.cast(locals)); } - actual = Definition.TypeToClass(locals.getDefinition().getType( - locals.getDefinition().ClassToType(clazz).struct, initialize ? 1 : arguments.size())); - array = Definition.defClassToObjectClass(actual); + actual = clazz; } @Override @@ -89,7 +84,7 @@ void write(MethodWriter writer, Globals globals) { if (initialize) { writer.push(arguments.size()); - writer.newArray(MethodWriter.getType(array.getComponentType())); + writer.newArray(MethodWriter.getType(actual.getComponentType())); for (int index = 0; index < arguments.size(); ++index) { AExpression argument = arguments.get(index); @@ -97,7 +92,7 @@ void write(MethodWriter writer, Globals globals) { writer.dup(); writer.push(index); argument.write(writer, globals); - writer.arrayStore(MethodWriter.getType(array.getComponentType())); + writer.arrayStore(MethodWriter.getType(actual.getComponentType())); } } else { for (AExpression argument : arguments) { @@ -105,9 +100,9 @@ void write(MethodWriter writer, Globals globals) { } if (arguments.size() > 1) { - writer.visitMultiANewArrayInsn(MethodWriter.getType(array).getDescriptor(), arguments.size()); + writer.visitMultiANewArrayInsn(MethodWriter.getType(actual).getDescriptor(), arguments.size()); } else { - writer.newArray(MethodWriter.getType(array.getComponentType())); + writer.newArray(MethodWriter.getType(actual.getComponentType())); } } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewObj.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewObj.java index 2a96d68bcb417..e3a926ef2244b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewObj.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENewObj.java @@ -58,12 +58,12 @@ void extractVariables(Set variables) { @Override void analyze(Locals locals) { try { - actual = Definition.TypeToClass(locals.getDefinition().getType(this.type)); + actual = locals.getDefinition().getJavaClassFromPainlessType(this.type); } catch (IllegalArgumentException exception) { throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); } - Struct struct = locals.getDefinition().ClassToType(actual).struct; + Struct struct = locals.getDefinition().getPainlessStructFromJavaClass(actual); constructor = struct.constructors.get(new Definition.MethodKey("", arguments.size())); if (constructor != null) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java index 5c3b4cadf6ee9..fa249b9df6237 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java @@ -72,7 +72,7 @@ void analyze(Locals locals) { } constant = new Constant( - location, locals.getDefinition().PatternType.type, "regexAt$" + location.getOffset(), this::initializeConstant); + location, MethodWriter.getType(Pattern.class), "regexAt$" + location.getOffset(), this::initializeConstant); actual = Pattern.class; } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EStatic.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EStatic.java index f5c2c6e9da354..5ebf30f5781cf 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EStatic.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EStatic.java @@ -19,7 +19,6 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; @@ -49,7 +48,7 @@ void extractVariables(Set variables) { @Override void analyze(Locals locals) { try { - actual = Definition.TypeToClass(locals.getDefinition().getType(type)); + actual = locals.getDefinition().getJavaClassFromPainlessType(type); } catch (IllegalArgumentException exception) { throw createError(new IllegalArgumentException("Not a type [" + type + "].")); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PBrace.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PBrace.java index 6712eccd914c5..0e2ab70897fe5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PBrace.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PBrace.java @@ -63,9 +63,9 @@ void analyze(Locals locals) { } else if (prefix.actual == def.class) { sub = new PSubDefArray(location, index); } else if (Map.class.isAssignableFrom(prefix.actual)) { - sub = new PSubMapShortcut(location, locals.getDefinition().ClassToType(prefix.actual).struct, index); + sub = new PSubMapShortcut(location, locals.getDefinition().getPainlessStructFromJavaClass(prefix.actual), index); } else if (List.class.isAssignableFrom(prefix.actual)) { - sub = new PSubListShortcut(location, locals.getDefinition().ClassToType(prefix.actual).struct, index); + sub = new PSubListShortcut(location, locals.getDefinition().getPainlessStructFromJavaClass(prefix.actual), index); } else { throw createError( new IllegalArgumentException("Illegal array access on type [" + Definition.ClassToName(prefix.actual) + "].")); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java index 498fb83239395..6fff5a8e93f3e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCallInvoke.java @@ -71,10 +71,10 @@ void analyze(Locals locals) { throw createError(new IllegalArgumentException("Illegal call [" + name + "] on array type.")); } - Struct struct = locals.getDefinition().ClassToType(prefix.actual).struct; + Struct struct = locals.getDefinition().getPainlessStructFromJavaClass(prefix.actual); if (prefix.actual.isPrimitive()) { - struct = locals.getDefinition().ClassToType(Definition.getBoxedType(prefix.actual)).struct; + struct = locals.getDefinition().getPainlessStructFromJavaClass(Definition.getBoxedType(prefix.actual)); } MethodKey methodKey = new MethodKey(name, arguments.size()); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java index 1f492758af618..de2c05dfa9b28 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PField.java @@ -67,7 +67,7 @@ void analyze(Locals locals) { } else if (prefix.actual == def.class) { sub = new PSubDefField(location, value); } else { - Struct struct = locals.getDefinition().ClassToType(prefix.actual).struct; + Struct struct = locals.getDefinition().getPainlessStructFromJavaClass(prefix.actual); Field field = prefix instanceof EStatic ? struct.staticMembers.get(value) : struct.members.get(value); if (field != null) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefCall.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefCall.java index d98c2f2276eaa..6428e47d1bacc 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefCall.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSubDefCall.java @@ -20,7 +20,6 @@ package org.elasticsearch.painless.node; import org.elasticsearch.painless.DefBootstrap; - import org.elasticsearch.painless.Definition.def; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java index 535ad5235b07c..98e45ca29f416 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCatch.java @@ -19,7 +19,6 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.Variable; @@ -68,7 +67,7 @@ void analyze(Locals locals) { Class clazz; try { - clazz = Definition.TypeToClass(locals.getDefinition().getType(this.type)); + clazz = locals.getDefinition().getJavaClassFromPainlessType(this.type); } catch (IllegalArgumentException exception) { throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java index f00db583ceae4..9f3f86abf438b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDeclaration.java @@ -19,7 +19,6 @@ package org.elasticsearch.painless.node; -import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.Variable; @@ -63,7 +62,7 @@ void analyze(Locals locals) { Class clazz; try { - clazz = Definition.TypeToClass(locals.getDefinition().getType(this.type)); + clazz = locals.getDefinition().getJavaClassFromPainlessType(this.type); } catch (IllegalArgumentException exception) { throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java index 04de0c0696e96..a3c8319825a26 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEach.java @@ -71,7 +71,7 @@ void analyze(Locals locals) { Class clazz; try { - clazz = Definition.TypeToClass(locals.getDefinition().getType(this.type)); + clazz = locals.getDefinition().getJavaClassFromPainlessType(this.type); } catch (IllegalArgumentException exception) { throw createError(new IllegalArgumentException("Not a type [" + this.type + "].")); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java index 5fa62f27e94dc..1b1e6bd2ef84b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFunction.java @@ -119,7 +119,7 @@ void extractVariables(Set variables) { void generateSignature(Definition definition) { try { - rtnType = Definition.TypeToClass(definition.getType(rtnTypeStr)); + rtnType = definition.getJavaClassFromPainlessType(rtnTypeStr); } catch (IllegalArgumentException exception) { throw createError(new IllegalArgumentException("Illegal return type [" + rtnTypeStr + "] for function [" + name + "].")); } @@ -133,7 +133,7 @@ void generateSignature(Definition definition) { for (int param = 0; param < this.paramTypeStrs.size(); ++param) { try { - Class paramType = Definition.TypeToClass(definition.getType(this.paramTypeStrs.get(param))); + Class paramType = definition.getJavaClassFromPainlessType(this.paramTypeStrs.get(param)); paramClasses[param] = Definition.defClassToObjectClass(paramType); paramTypes.add(paramType); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java index ca30d641e7468..11e0f15d7e4f8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSubEachIterable.java @@ -25,7 +25,6 @@ import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.MethodKey; -import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.painless.Definition.def; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; @@ -78,12 +77,11 @@ void analyze(Locals locals) { if (expression.actual == def.class) { method = null; } else { - Type actualType = locals.getDefinition().ClassToType(expression.actual); - method = actualType.struct.methods.get(new MethodKey("iterator", 0)); + method = locals.getDefinition().getPainlessStructFromJavaClass(expression.actual).methods.get(new MethodKey("iterator", 0)); if (method == null) { throw createError(new IllegalArgumentException( - "Unable to create iterator for the type [" + actualType.name + "].")); + "Unable to create iterator for the type [" + Definition.ClassToName(expression.actual) + "].")); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java index 8fd96d67d5b53..52528c358fc82 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java @@ -1,5 +1,3 @@ -package org.elasticsearch.painless; - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -19,6 +17,8 @@ * under the License. */ +package org.elasticsearch.painless; + import java.lang.invoke.CallSite; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java index ed38f4c511f59..309b6be97f20b 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java @@ -26,9 +26,6 @@ import org.elasticsearch.painless.Definition.Field; import org.elasticsearch.painless.Definition.Method; import org.elasticsearch.painless.Definition.Struct; -import org.elasticsearch.painless.Definition.Type; -import org.elasticsearch.painless.spi.Whitelist; - import java.io.IOException; import java.io.PrintStream; import java.lang.reflect.Modifier; @@ -44,12 +41,14 @@ import static java.util.Comparator.comparing; import static java.util.stream.Collectors.toList; +import static org.elasticsearch.painless.spi.Whitelist.BASE_WHITELISTS; /** * Generates an API reference from the method and type whitelists in {@link Definition}. */ public class PainlessDocGenerator { - private static final Definition definition = new Definition(Whitelist.BASE_WHITELISTS); + + private static final Definition definition = new Definition(BASE_WHITELISTS); private static final Logger logger = ESLoggerFactory.getLogger(PainlessDocGenerator.class); private static final Comparator FIELD_NAME = comparing(f -> f.name); private static final Comparator METHOD_NAME = comparing(m -> m.name); @@ -68,41 +67,41 @@ public static void main(String[] args) throws IOException { Files.newOutputStream(indexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, StandardCharsets.UTF_8.name())) { emitGeneratedWarning(indexStream); - List types = definition.allSimpleTypes().stream().sorted(comparing(t -> t.name)).collect(toList()); - for (Type type : types) { - if (type.clazz.isPrimitive()) { + List structs = definition.getStructs().stream().sorted(comparing(t -> t.name)).collect(toList()); + for (Struct struct : structs) { + if (struct.clazz.isPrimitive()) { // Primitives don't have methods to reference continue; } - if ("def".equals(type.name)) { + if ("def".equals(struct.name)) { // def is special but doesn't have any methods all of its own. continue; } indexStream.print("include::"); - indexStream.print(type.struct.name); + indexStream.print(struct.name); indexStream.println(".asciidoc[]"); - Path typePath = apiRootPath.resolve(type.struct.name + ".asciidoc"); - logger.info("Writing [{}.asciidoc]", type.name); + Path typePath = apiRootPath.resolve(struct.name + ".asciidoc"); + logger.info("Writing [{}.asciidoc]", struct.name); try (PrintStream typeStream = new PrintStream( Files.newOutputStream(typePath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, StandardCharsets.UTF_8.name())) { emitGeneratedWarning(typeStream); typeStream.print("[["); - emitAnchor(typeStream, type.struct); + emitAnchor(typeStream, struct); typeStream.print("]]++"); - typeStream.print(type.name); + typeStream.print(struct.name); typeStream.println("++::"); Consumer documentField = field -> PainlessDocGenerator.documentField(typeStream, field); Consumer documentMethod = method -> PainlessDocGenerator.documentMethod(typeStream, method); - type.struct.staticMembers.values().stream().sorted(FIELD_NAME).forEach(documentField); - type.struct.members.values().stream().sorted(FIELD_NAME).forEach(documentField); - type.struct.staticMethods.values().stream().sorted(METHOD_NAME.thenComparing(NUMBER_OF_ARGS)).forEach(documentMethod); - type.struct.constructors.values().stream().sorted(NUMBER_OF_ARGS).forEach(documentMethod); + struct.staticMembers.values().stream().sorted(FIELD_NAME).forEach(documentField); + struct.members.values().stream().sorted(FIELD_NAME).forEach(documentField); + struct.staticMethods.values().stream().sorted(METHOD_NAME.thenComparing(NUMBER_OF_ARGS)).forEach(documentMethod); + struct.constructors.values().stream().sorted(NUMBER_OF_ARGS).forEach(documentMethod); Map inherited = new TreeMap<>(); - type.struct.methods.values().stream().sorted(METHOD_NAME.thenComparing(NUMBER_OF_ARGS)).forEach(method -> { - if (method.owner == type.struct) { + struct.methods.values().stream().sorted(METHOD_NAME.thenComparing(NUMBER_OF_ARGS)).forEach(method -> { + if (method.owner == struct) { documentMethod(typeStream, method); } else { inherited.put(method.owner.name, method.owner); @@ -139,7 +138,7 @@ private static void documentField(PrintStream stream, Field field) { stream.print("static "); } - emitType(stream, definition.ClassToType(field.clazz)); + emitType(stream, field.clazz); stream.print(' '); String javadocRoot = javadocRoot(field); @@ -170,7 +169,7 @@ private static void documentMethod(PrintStream stream, Method method) { } if (false == method.name.equals("")) { - emitType(stream, definition.ClassToType(method.rtn)); + emitType(stream, method.rtn); stream.print(' '); } @@ -188,7 +187,7 @@ private static void documentMethod(PrintStream stream, Method method) { } else { stream.print(", "); } - emitType(stream, definition.ClassToType(arg)); + emitType(stream, arg); } stream.print(")++"); @@ -234,19 +233,19 @@ private static String methodName(Method method) { } /** - * Emit a {@link Type}. If the type is primitive or an array of primitives this just emits the name of the type. Otherwise this emits an - * internal link with the text. + * Emit a {@link Class}. If the type is primitive or an array of primitives this just emits the name of the type. Otherwise this emits + an internal link with the text. */ - private static void emitType(PrintStream stream, Type type) { - emitStruct(stream, type.struct); - for (int i = 0; i < type.dimensions; i++) { + private static void emitType(PrintStream stream, Class clazz) { + emitStruct(stream, definition.getPainlessStructFromJavaClass(clazz)); + while ((clazz = clazz.getComponentType()) != null) { stream.print("[]"); } } /** - * Emit a {@link Struct}. If the {@linkplain Struct} is primitive or def this just emits the name of the struct. Otherwise this emits an - * internal link with the name. + * Emit a {@link Struct}. If the {@linkplain Struct} is primitive or def this just emits the name of the struct. Otherwise this emits + * an internal link with the name. */ private static void emitStruct(PrintStream stream, Struct struct) { if (false == struct.clazz.isPrimitive() && false == struct.name.equals("def")) { @@ -279,14 +278,13 @@ private static void emitJavadocLink(PrintStream stream, String root, Method meth stream.print(method.owner.clazz.getName()); } for (Class clazz: method.arguments) { - Type arg = definition.ClassToType(clazz); if (first) { first = false; } else { stream.print("%2D"); } - stream.print(arg.struct.clazz.getName()); - if (arg.dimensions > 0) { + stream.print(clazz.getName()); + if (clazz.isArray()) { stream.print(":A"); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 6dbe480d4b5a3..fd8190aa2c2eb 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -284,12 +284,12 @@ public void testEMapInit() { } public void testENewArray() { - assertToString("(SSource (SReturn (ENewArray int dims (Args (ENumeric 10)))))", "return new int[10]"); - assertToString("(SSource (SReturn (ENewArray int dims (Args (ENumeric 10) (ENumeric 4) (ENumeric 5)))))", + assertToString("(SSource (SReturn (ENewArray int[] dims (Args (ENumeric 10)))))", "return new int[10]"); + assertToString("(SSource (SReturn (ENewArray int[][][] dims (Args (ENumeric 10) (ENumeric 4) (ENumeric 5)))))", "return new int[10][4][5]"); - assertToString("(SSource (SReturn (ENewArray int init (Args (ENumeric 1) (ENumeric 2) (ENumeric 3)))))", + assertToString("(SSource (SReturn (ENewArray int[] init (Args (ENumeric 1) (ENumeric 2) (ENumeric 3)))))", "return new int[] {1, 2, 3}"); - assertToString("(SSource (SReturn (ENewArray def init (Args (ENumeric 1) (ENumeric 2) (EString 'bird')))))", + assertToString("(SSource (SReturn (ENewArray def[] init (Args (ENumeric 1) (ENumeric 2) (EString 'bird')))))", "return new def[] {1, 2, 'bird'}"); } @@ -372,7 +372,7 @@ public void testPField() { assertToString("(SSource (SReturn (PField nullSafe (EVariable params) a)))", "return params?.a"); assertToString( "(SSource\n" - + " (SDeclBlock (SDeclaration int[] a (ENewArray int dims (Args (ENumeric 10)))))\n" + + " (SDeclBlock (SDeclaration int[] a (ENewArray int[] dims (Args (ENumeric 10)))))\n" + " (SReturn (PField (EVariable a) length)))", "int[] a = new int[10];\n" + "return a.length"); @@ -403,7 +403,7 @@ public void testPSubBrace() { public void testPSubCallInvoke() { Location l = new Location(getTestName(), 0); - Struct c = definition.ClassToType(Integer.class).struct; + Struct c = definition.getPainlessStructFromJavaClass(Integer.class); Method m = c.methods.get(new MethodKey("toString", 0)); PSubCallInvoke node = new PSubCallInvoke(l, m, null, emptyList()); node.prefix = new EVariable(l, "a"); @@ -458,7 +458,7 @@ public void testPSubDefField() { public void testPSubField() { Location l = new Location(getTestName(), 0); - Struct s = definition.getType(Boolean.class.getSimpleName()).struct; + Struct s = definition.getPainlessStructFromJavaClass(Boolean.class); Field f = s.staticMembers.get("TRUE"); PSubField node = new PSubField(l, f); node.prefix = new EStatic(l, "Boolean"); @@ -468,7 +468,7 @@ public void testPSubField() { public void testPSubListShortcut() { Location l = new Location(getTestName(), 0); - Struct s = definition.getType(List.class.getSimpleName()).struct; + Struct s = definition.getPainlessStructFromJavaClass(List.class); PSubListShortcut node = new PSubListShortcut(l, s, new EConstant(l, 1)); node.prefix = new EVariable(l, "a"); assertEquals("(PSubListShortcut (EVariable a) (EConstant Integer 1))", node.toString()); @@ -476,7 +476,7 @@ public void testPSubListShortcut() { new PSubNullSafeCallInvoke(l, node).toString()); l = new Location(getTestName(), 0); - s = definition.getType(List.class.getSimpleName()).struct; + s = definition.getPainlessStructFromJavaClass(List.class); node = new PSubListShortcut(l, s, new EBinary(l, Operation.ADD, new EConstant(l, 1), new EConstant(l, 4))); node.prefix = new EVariable(l, "a"); assertEquals("(PSubListShortcut (EVariable a) (EBinary (EConstant Integer 1) + (EConstant Integer 4)))", node.toString()); @@ -484,7 +484,7 @@ public void testPSubListShortcut() { public void testPSubMapShortcut() { Location l = new Location(getTestName(), 0); - Struct s = definition.getType(Map.class.getSimpleName()).struct; + Struct s = definition.getPainlessStructFromJavaClass(Map.class); PSubMapShortcut node = new PSubMapShortcut(l, s, new EConstant(l, "cat")); node.prefix = new EVariable(l, "a"); assertEquals("(PSubMapShortcut (EVariable a) (EConstant String 'cat'))", node.toString()); @@ -492,7 +492,7 @@ public void testPSubMapShortcut() { new PSubNullSafeCallInvoke(l, node).toString()); l = new Location(getTestName(), 1); - s = definition.getType(Map.class.getSimpleName()).struct; + s = definition.getPainlessStructFromJavaClass(Map.class); node = new PSubMapShortcut(l, s, new EBinary(l, Operation.ADD, new EConstant(l, 1), new EConstant(l, 4))); node.prefix = new EVariable(l, "a"); assertEquals("(PSubMapShortcut (EVariable a) (EBinary (EConstant Integer 1) + (EConstant Integer 4)))", node.toString()); @@ -500,7 +500,7 @@ public void testPSubMapShortcut() { public void testPSubShortcut() { Location l = new Location(getTestName(), 0); - Struct s = definition.getType(FeatureTest.class.getName()).struct; + Struct s = definition.getPainlessStructFromJavaClass(FeatureTest.class); Method getter = s.methods.get(new MethodKey("getX", 0)); Method setter = s.methods.get(new MethodKey("setX", 1)); PSubShortcut node = new PSubShortcut(l, "x", FeatureTest.class.getName(), getter, setter); From ac7fadd3367aa70ba256a1133942a76c9f6ab333 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 3 Jul 2018 13:40:37 -0700 Subject: [PATCH 28/36] [DOCS] Starting Elasticsearch (#31701) --- docs/reference/setup.asciidoc | 2 + .../reference/setup/install/deb-init.asciidoc | 20 ++++++ docs/reference/setup/install/deb.asciidoc | 23 +----- .../setup/install/init-systemd.asciidoc | 2 - .../setup/install/msi-windows-start.asciidoc | 16 +++++ .../reference/setup/install/rpm-init.asciidoc | 20 ++++++ docs/reference/setup/install/rpm.asciidoc | 24 +------ docs/reference/setup/install/windows.asciidoc | 18 +---- .../setup/install/zip-targz-daemon.asciidoc | 21 ++++++ .../setup/install/zip-targz-start.asciidoc | 17 +++++ .../setup/install/zip-targz.asciidoc | 39 +--------- .../setup/install/zip-windows-start.asciidoc | 11 +++ .../setup/install/zip-windows.asciidoc | 12 +--- docs/reference/setup/starting.asciidoc | 72 +++++++++++++++++++ 14 files changed, 189 insertions(+), 108 deletions(-) create mode 100644 docs/reference/setup/install/deb-init.asciidoc create mode 100644 docs/reference/setup/install/msi-windows-start.asciidoc create mode 100644 docs/reference/setup/install/rpm-init.asciidoc create mode 100644 docs/reference/setup/install/zip-targz-daemon.asciidoc create mode 100644 docs/reference/setup/install/zip-targz-start.asciidoc create mode 100644 docs/reference/setup/install/zip-windows-start.asciidoc create mode 100644 docs/reference/setup/starting.asciidoc diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index b8516bdc6cb5c..60e3c1dac2948 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -55,4 +55,6 @@ include::setup/sysconfig.asciidoc[] include::setup/bootstrap-checks.asciidoc[] +include::setup/starting.asciidoc[] + include::setup/stopping.asciidoc[] diff --git a/docs/reference/setup/install/deb-init.asciidoc b/docs/reference/setup/install/deb-init.asciidoc new file mode 100644 index 0000000000000..0e6e142a82927 --- /dev/null +++ b/docs/reference/setup/install/deb-init.asciidoc @@ -0,0 +1,20 @@ +==== Running Elasticsearch with SysV `init` + +Use the `update-rc.d` command to configure Elasticsearch to start automatically +when the system boots up: + +[source,sh] +-------------------------------------------------- +sudo update-rc.d elasticsearch defaults 95 10 +-------------------------------------------------- + +Elasticsearch can be started and stopped using the `service` command: + +[source,sh] +-------------------------------------------- +sudo -i service elasticsearch start +sudo -i service elasticsearch stop +-------------------------------------------- + +If Elasticsearch fails to start for any reason, it will print the reason for +failure to STDOUT. Log files can be found in `/var/log/elasticsearch/`. \ No newline at end of file diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index 2abacf947c7bc..629abe37afe62 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -143,29 +143,12 @@ include::xpack-indices.asciidoc[] endif::include-xpack[] +==== SysV `init` vs `systemd` + include::init-systemd.asciidoc[] [[deb-running-init]] -==== Running Elasticsearch with SysV `init` - -Use the `update-rc.d` command to configure Elasticsearch to start automatically -when the system boots up: - -[source,sh] --------------------------------------------------- -sudo update-rc.d elasticsearch defaults 95 10 --------------------------------------------------- - -Elasticsearch can be started and stopped using the `service` command: - -[source,sh] --------------------------------------------- -sudo -i service elasticsearch start -sudo -i service elasticsearch stop --------------------------------------------- - -If Elasticsearch fails to start for any reason, it will print the reason for -failure to STDOUT. Log files can be found in `/var/log/elasticsearch/`. +include::deb-init.asciidoc[] [[deb-running-systemd]] include::systemd.asciidoc[] diff --git a/docs/reference/setup/install/init-systemd.asciidoc b/docs/reference/setup/install/init-systemd.asciidoc index 1532c5313aefd..144fe4c481275 100644 --- a/docs/reference/setup/install/init-systemd.asciidoc +++ b/docs/reference/setup/install/init-systemd.asciidoc @@ -1,5 +1,3 @@ -==== SysV `init` vs `systemd` - Elasticsearch is not started automatically after installation. How to start and stop Elasticsearch depends on whether your system uses SysV `init` or `systemd` (used by newer distributions). You can tell which is being used by diff --git a/docs/reference/setup/install/msi-windows-start.asciidoc b/docs/reference/setup/install/msi-windows-start.asciidoc new file mode 100644 index 0000000000000..28bcfed6af3de --- /dev/null +++ b/docs/reference/setup/install/msi-windows-start.asciidoc @@ -0,0 +1,16 @@ +==== Running Elasticsearch from the command line + +Once installed, Elasticsearch can be started from the command line, if not installed as a service +and configured to start when installation completes, as follows: + +["source","sh",subs="attributes,callouts"] +-------------------------------------------- +.\bin\elasticsearch.exe +-------------------------------------------- + +The command line terminal will display output similar to the following: + +image::images/msi_installer/elasticsearch_exe.png[] + +By default, Elasticsearch runs in the foreground, prints its logs to `STDOUT` in addition +to the `.log` file within `LOGSDIRECTORY`, and can be stopped by pressing `Ctrl-C`. diff --git a/docs/reference/setup/install/rpm-init.asciidoc b/docs/reference/setup/install/rpm-init.asciidoc new file mode 100644 index 0000000000000..a3db166308caf --- /dev/null +++ b/docs/reference/setup/install/rpm-init.asciidoc @@ -0,0 +1,20 @@ +==== Running Elasticsearch with SysV `init` + +Use the `chkconfig` command to configure Elasticsearch to start automatically +when the system boots up: + +[source,sh] +-------------------------------------------------- +sudo chkconfig --add elasticsearch +-------------------------------------------------- + +Elasticsearch can be started and stopped using the `service` command: + +[source,sh] +-------------------------------------------- +sudo -i service elasticsearch start +sudo -i service elasticsearch stop +-------------------------------------------- + +If Elasticsearch fails to start for any reason, it will print the reason for +failure to STDOUT. Log files can be found in `/var/log/elasticsearch/`. diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index aad7cf5bf3b73..a6f106497e9d2 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -130,30 +130,12 @@ include::xpack-indices.asciidoc[] endif::include-xpack[] +==== SysV `init` vs `systemd` + include::init-systemd.asciidoc[] [[rpm-running-init]] -==== Running Elasticsearch with SysV `init` - -Use the `chkconfig` command to configure Elasticsearch to start automatically -when the system boots up: - -[source,sh] --------------------------------------------------- -sudo chkconfig --add elasticsearch --------------------------------------------------- - -Elasticsearch can be started and stopped using the `service` command: - -[source,sh] --------------------------------------------- -sudo -i service elasticsearch start -sudo -i service elasticsearch stop --------------------------------------------- - -If Elasticsearch fails to start for any reason, it will print the reason for -failure to STDOUT. Log files can be found in `/var/log/elasticsearch/`. - +include::rpm-init.asciidoc[] [[rpm-running-systemd]] include::systemd.asciidoc[] diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 56bb953c18ebb..1535e5415e4a4 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -342,23 +342,7 @@ include::xpack-indices.asciidoc[] endif::include-xpack[] [[msi-installer-command-line-running]] -==== Running Elasticsearch from the command line - -Once installed, Elasticsearch can be started from the command line, if not installed as a service -and configured to start when installation completes, as follows: - -["source","sh",subs="attributes,callouts"] --------------------------------------------- -.\bin\elasticsearch.exe --------------------------------------------- - -The command line terminal will display output similar to the following: - -[[msi-installer-elasticsearch-exe]] -image::images/msi_installer/elasticsearch_exe.png[] - -By default, Elasticsearch runs in the foreground, prints its logs to `STDOUT` in addition -to the `.log` file within `LOGSDIRECTORY`, and can be stopped by pressing `Ctrl-C`. +include::msi-windows-start.asciidoc[] [[msi-installer-command-line-configuration]] ==== Configuring Elasticsearch on the command line diff --git a/docs/reference/setup/install/zip-targz-daemon.asciidoc b/docs/reference/setup/install/zip-targz-daemon.asciidoc new file mode 100644 index 0000000000000..31d9c3c2e7437 --- /dev/null +++ b/docs/reference/setup/install/zip-targz-daemon.asciidoc @@ -0,0 +1,21 @@ +==== Running as a daemon + +To run Elasticsearch as a daemon, specify `-d` on the command line, and record +the process ID in a file using the `-p` option: + +[source,sh] +-------------------------------------------- +./bin/elasticsearch -d -p pid +-------------------------------------------- + +Log messages can be found in the `$ES_HOME/logs/` directory. + +To shut down Elasticsearch, kill the process ID recorded in the `pid` file: + +[source,sh] +-------------------------------------------- +kill `cat pid` +-------------------------------------------- + +NOTE: The startup scripts provided in the <> and <> +packages take care of starting and stopping the Elasticsearch process for you. diff --git a/docs/reference/setup/install/zip-targz-start.asciidoc b/docs/reference/setup/install/zip-targz-start.asciidoc new file mode 100644 index 0000000000000..907b2a7317d79 --- /dev/null +++ b/docs/reference/setup/install/zip-targz-start.asciidoc @@ -0,0 +1,17 @@ +==== Running Elasticsearch from the command line + +Elasticsearch can be started from the command line as follows: + +[source,sh] +-------------------------------------------- +./bin/elasticsearch +-------------------------------------------- + +By default, Elasticsearch runs in the foreground, prints its logs to the +standard output (`stdout`), and can be stopped by pressing `Ctrl-C`. + +NOTE: All scripts packaged with Elasticsearch require a version of Bash +that supports arrays and assume that Bash is available at `/bin/bash`. +As such, Bash should be available at this path either directly or via a +symbolic link. + diff --git a/docs/reference/setup/install/zip-targz.asciidoc b/docs/reference/setup/install/zip-targz.asciidoc index f44742c648e8e..735ca5b4ea0d1 100644 --- a/docs/reference/setup/install/zip-targz.asciidoc +++ b/docs/reference/setup/install/zip-targz.asciidoc @@ -90,22 +90,7 @@ include::xpack-indices.asciidoc[] endif::include-xpack[] [[zip-targz-running]] -==== Running Elasticsearch from the command line - -Elasticsearch can be started from the command line as follows: - -[source,sh] --------------------------------------------- -./bin/elasticsearch --------------------------------------------- - -By default, Elasticsearch runs in the foreground, prints its logs to the -standard output (`stdout`), and can be stopped by pressing `Ctrl-C`. - -NOTE: All scripts packaged with Elasticsearch require a version of Bash -that supports arrays and assume that Bash is available at `/bin/bash`. -As such, Bash should be available at this path either directly or via a -symbolic link. +include::zip-targz-start.asciidoc[] include::check-running.asciidoc[] @@ -113,27 +98,7 @@ Log printing to `stdout` can be disabled using the `-q` or `--quiet` option on the command line. [[setup-installation-daemon]] -==== Running as a daemon - -To run Elasticsearch as a daemon, specify `-d` on the command line, and record -the process ID in a file using the `-p` option: - -[source,sh] --------------------------------------------- -./bin/elasticsearch -d -p pid --------------------------------------------- - -Log messages can be found in the `$ES_HOME/logs/` directory. - -To shut down Elasticsearch, kill the process ID recorded in the `pid` file: - -[source,sh] --------------------------------------------- -kill `cat pid` --------------------------------------------- - -NOTE: The startup scripts provided in the <> and <> -packages take care of starting and stopping the Elasticsearch process for you. +include::zip-targz-daemon.asciidoc[] [[zip-targz-configuring]] ==== Configuring Elasticsearch on the command line diff --git a/docs/reference/setup/install/zip-windows-start.asciidoc b/docs/reference/setup/install/zip-windows-start.asciidoc new file mode 100644 index 0000000000000..7ecea449d2895 --- /dev/null +++ b/docs/reference/setup/install/zip-windows-start.asciidoc @@ -0,0 +1,11 @@ +==== Running Elasticsearch from the command line + +Elasticsearch can be started from the command line as follows: + +[source,sh] +-------------------------------------------- +.\bin\elasticsearch.bat +-------------------------------------------- + +By default, Elasticsearch runs in the foreground, prints its logs to `STDOUT`, +and can be stopped by pressing `Ctrl-C`. diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index cd86a6268911c..254fb63f6157d 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -58,17 +58,7 @@ include::xpack-indices.asciidoc[] endif::include-xpack[] [[windows-running]] -==== Running Elasticsearch from the command line - -Elasticsearch can be started from the command line as follows: - -[source,sh] --------------------------------------------- -.\bin\elasticsearch.bat --------------------------------------------- - -By default, Elasticsearch runs in the foreground, prints its logs to `STDOUT`, -and can be stopped by pressing `Ctrl-C`. +include::zip-windows-start.asciidoc[] [[windows-configuring]] ==== Configuring Elasticsearch on the command line diff --git a/docs/reference/setup/starting.asciidoc b/docs/reference/setup/starting.asciidoc new file mode 100644 index 0000000000000..6fab871e7c9ca --- /dev/null +++ b/docs/reference/setup/starting.asciidoc @@ -0,0 +1,72 @@ +[[starting-elasticsearch]] +== Starting Elasticsearch + +The method for starting {es} varies depending on how you installed it. + +[float] +[[start-targz]] +=== Archive packages (`.tar.gz`) + +If you installed {es} with a `.tar.gz` package, you can start {es} from the +command line. + +[float] +include::install/zip-targz-start.asciidoc[] + +[float] +include::install/zip-targz-daemon.asciidoc[] + +[float] +[[start-zip]] +=== Archive packages (`.zip`) + +If you installed {es} on Windows with a `.zip` package, you can start {es} from +the command line. If you want {es} to start automatically at boot time without +any user interaction, <>. + +[float] +include::install/zip-windows-start.asciidoc[] + +[float] +[[start-deb]] +=== Debian packages + +include::install/init-systemd.asciidoc[] + +[float] +include::install/deb-init.asciidoc[] + +[float] +include::install/systemd.asciidoc[] + +[float] +[[start-docker]] +=== Docker images + +If you installed a Docker image, you can start {es} from the command line. There +are different methods depending on whether you're using development mode or +production mode. See <>. + +[float] +[[start-msi]] +=== MSI packages + +If you installed {es} on Windows using the `.msi` package, you can start {es} +from the command line. If you want it to start automatically at boot time +without any user interaction, +<>. + +[float] +include::install/msi-windows-start.asciidoc[] + +[float] +[[start-rpm]] +=== RPM packages + +include::install/init-systemd.asciidoc[] + +[float] +include::install/rpm-init.asciidoc[] + +[float] +include::install/systemd.asciidoc[] \ No newline at end of file From 42ff8aa2ef21d28d54d87b76f7e746a9f15334e8 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 3 Jul 2018 17:10:52 -0600 Subject: [PATCH 29/36] [DOCS] Add missing get mappings docs to HLRC (#31765) This commit adds the high-level rest client docs for the get mappings API that was added in #30889 --- docs/java-rest/high-level/supported-apis.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 0faf73e59074a..5308646eabad4 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -81,6 +81,7 @@ Index Management:: Mapping Management:: * <> +* <> * <> Alias Management:: From 32d67ef5048a6c72fa378bb6c8c5fdfb2c0a13e4 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 4 Jul 2018 08:04:07 +0200 Subject: [PATCH 30/36] Fixture for Minio testing (#31688) Adds a Minio fixture to run the S3 repository tests against Minio. Also collapses the single qa subproject into the s3-repository project, which simplifies the code structure (having it all in one place) and helps to avoid having too many Gradle subprojects. --- plugins/repository-s3/build.gradle | 250 +++++++++++++++++- .../repository-s3/qa/amazon-s3/build.gradle | 112 -------- ...azonS3RepositoryClientYamlTestSuiteIT.java | 37 --- plugins/repository-s3/qa/build.gradle | 0 .../repositories/s3/AmazonS3Fixture.java | 0 .../20_repository_permanent_credentials.yml} | 8 +- .../30_repository_temporary_credentials.yml} | 8 +- 7 files changed, 257 insertions(+), 158 deletions(-) delete mode 100644 plugins/repository-s3/qa/amazon-s3/build.gradle delete mode 100644 plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java delete mode 100644 plugins/repository-s3/qa/build.gradle rename plugins/repository-s3/{qa/amazon-s3 => }/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java (100%) rename plugins/repository-s3/{qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository_permanent_credentials.yml => src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml} (96%) rename plugins/repository-s3/{qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_temporary_credentials.yml => src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml} (96%) diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 8448b2ab9e1ac..dc2140a6086a4 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,3 +1,12 @@ +import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.LoggedExec +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.AntFixture +import org.elasticsearch.gradle.test.ClusterConfiguration +import org.elasticsearch.gradle.test.RestIntegTestTask + +import java.lang.reflect.Field + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -64,14 +73,245 @@ test { exclude '**/*CredentialsTests.class' } -check { - // also execute the QA tests when testing the plugin - dependsOn 'qa:amazon-s3:check' +boolean useFixture = false + +// We test against two repositories, one which uses the usual two-part "permanent" credentials and +// the other which uses three-part "temporary" or "session" credentials. + +String s3PermanentAccessKey = System.getenv("amazon_s3_access_key") +String s3PermanentSecretKey = System.getenv("amazon_s3_secret_key") +String s3PermanentBucket = System.getenv("amazon_s3_bucket") +String s3PermanentBasePath = System.getenv("amazon_s3_base_path") + +String s3TemporaryAccessKey = System.getenv("amazon_s3_access_key_temporary") +String s3TemporarySecretKey = System.getenv("amazon_s3_secret_key_temporary") +String s3TemporarySessionToken = System.getenv("amazon_s3_session_token_temporary") +String s3TemporaryBucket = System.getenv("amazon_s3_bucket_temporary") +String s3TemporaryBasePath = System.getenv("amazon_s3_base_path_temporary") + +// If all these variables are missing then we are testing against the internal fixture instead, which has the following +// credentials hard-coded in. + +if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3PermanentBasePath + && !s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3TemporaryBasePath && !s3TemporarySessionToken) { + + s3PermanentAccessKey = 's3_integration_test_permanent_access_key' + s3PermanentSecretKey = 's3_integration_test_permanent_secret_key' + s3PermanentBucket = 'permanent-bucket-test' + s3PermanentBasePath = 'integration_test' + + s3TemporaryAccessKey = 's3_integration_test_temporary_access_key' + s3TemporarySecretKey = 's3_integration_test_temporary_secret_key' + s3TemporaryBucket = 'temporary-bucket-test' + s3TemporaryBasePath = 'integration_test' + s3TemporarySessionToken = 's3_integration_test_temporary_session_token' + + useFixture = true +} else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath + || !s3TemporaryAccessKey || !s3TemporarySecretKey || !s3TemporaryBucket || !s3TemporaryBasePath || !s3TemporarySessionToken) { + throw new IllegalArgumentException("not all options specified to run against external S3 service") +} + +final String minioVersion = 'RELEASE.2018-06-22T23-48-46Z' +final String minioBinDir = "${buildDir}/minio/bin" +final String minioDataDir = "${buildDir}/minio/data" +final String minioAddress = "127.0.0.1:60920" + +final String minioDistribution +final String minioCheckSum +if (Os.isFamily(Os.FAMILY_MAC)) { + minioDistribution = 'darwin-amd64' + minioCheckSum = '96b0bcb2f590e8e65fb83d5c3e221f9bd1106b49fa6f22c6b726b80b845d7c60' +} else if (Os.isFamily(Os.FAMILY_UNIX)) { + minioDistribution = 'linux-amd64' + minioCheckSum = '713dac7c105285eab3b92649be92b5e793b29d3525c7929fa7aaed99374fad99' +} else { + minioDistribution = null + minioCheckSum = null +} + +buildscript { + repositories { + maven { + url 'https://plugins.gradle.org/m2/' + } + } + dependencies { + classpath 'de.undercouch:gradle-download-task:3.4.3' + } +} + +if (useFixture && minioDistribution) { + apply plugin: 'de.undercouch.download' + + final String minioFileName = "minio.${minioVersion}" + final String minioDownloadURL = "https://dl.minio.io/server/minio/release/${minioDistribution}/archive/${minioFileName}" + final String minioFilePath = "${gradle.gradleUserHomeDir}/downloads/minio/${minioDistribution}/${minioFileName}" + + task downloadMinio(type: Download) { + src minioDownloadURL + dest minioFilePath + onlyIfModified true + } + + task verifyMinioChecksum(type: Verify, dependsOn: downloadMinio) { + src minioFilePath + algorithm 'SHA-256' + checksum minioCheckSum + } + + task installMinio(type: Sync, dependsOn: verifyMinioChecksum) { + from minioFilePath + into minioBinDir + fileMode 0755 + } + + task startMinio { + dependsOn installMinio + + ext.minioPid = 0L + + doLast { + new File("${minioDataDir}/${s3PermanentBucket}").mkdirs() + // we skip these tests on Windows so we do no need to worry about compatibility here + final ProcessBuilder minio = new ProcessBuilder( + "${minioBinDir}/${minioFileName}", + "server", + "--address", + minioAddress, + minioDataDir) + minio.environment().put('MINIO_ACCESS_KEY', s3PermanentAccessKey) + minio.environment().put('MINIO_SECRET_KEY', s3PermanentSecretKey) + final Process process = minio.start() + if (JavaVersion.current() <= JavaVersion.VERSION_1_8) { + try { + Class cProcessImpl = process.getClass() + Field fPid = cProcessImpl.getDeclaredField("pid") + if (!fPid.isAccessible()) { + fPid.setAccessible(true) + } + minioPid = fPid.getInt(process) + } catch (Exception e) { + logger.error("failed to read pid from minio process", e) + process.destroyForcibly() + throw e + } + } else { + minioPid = process.pid() + } + + new BufferedReader(new InputStreamReader(process.getInputStream())).withReader { br -> + String line + int httpPort = 0 + while ((line = br.readLine()) != null) { + logger.info(line) + if (line.matches('.*Endpoint.*:\\d+$')) { + assert httpPort == 0 + final int index = line.lastIndexOf(":") + assert index >= 0 + httpPort = Integer.parseInt(line.substring(index + 1)) + + final File script = new File(project.buildDir, "minio/minio.killer.sh") + script.setText( + ["function shutdown {", + " kill ${minioPid}", + "}", + "trap shutdown EXIT", + // will wait indefinitely for input, but we never pass input, and the pipe is only closed when the build dies + "read line\n"].join('\n'), 'UTF-8') + final ProcessBuilder killer = new ProcessBuilder("bash", script.absolutePath) + killer.start() + break + } + } + + assert httpPort > 0 + } + } + } + + task stopMinio(type: LoggedExec) { + onlyIf { startMinio.minioPid > 0 } + + doFirst { + logger.info("Shutting down minio with pid ${startMinio.minioPid}") + } + + final Object pid = "${ -> startMinio.minioPid }" + + // we skip these tests on Windows so we do no need to worry about compatibility here + executable = 'kill' + args('-9', pid) + } + + RestIntegTestTask integTestMinio = project.tasks.create('integTestMinio', RestIntegTestTask.class) { + description = "Runs REST tests using the Minio repository." + } + + // The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks: + project.afterEvaluate { + ClusterConfiguration cluster = project.extensions.getByName('integTestMinioCluster') as ClusterConfiguration + cluster.dependsOn(project.bundlePlugin) + cluster.keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey + cluster.keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey + + cluster.setting 's3.client.integration_test_permanent.endpoint', "http://${minioAddress}" + + Task restIntegTestTask = project.tasks.getByName('integTestMinio') + restIntegTestTask.clusterConfig.plugin(project.path) + + // Default jvm arguments for all test clusters + String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + + " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + + " " + System.getProperty('tests.jvm.argline', '') + + restIntegTestTask.clusterConfig.jvmArgs = jvmArgs + } + + integTestMinioRunner.dependsOn(startMinio) + integTestMinioRunner.finalizedBy(stopMinio) + // Minio only supports a single access key, see https://github.com/minio/minio/pull/5968 + integTestMinioRunner.systemProperty 'tests.rest.blacklist', 'repository_s3/30_repository_temporary_credentials/*' + + project.check.dependsOn(integTestMinio) +} + +/** A task to start the AmazonS3Fixture which emulates an S3 service **/ +task s3Fixture(type: AntFixture) { + dependsOn testClasses + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3PermanentBucket, s3TemporaryBucket +} + +Map expansions = [ + 'permanent_bucket': s3PermanentBucket, + 'permanent_base_path': s3PermanentBasePath, + 'temporary_bucket': s3TemporaryBucket, + 'temporary_base_path': s3TemporaryBasePath +] + +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) } integTestCluster { - keystoreSetting 's3.client.integration_test.access_key', "s3_integration_test_access_key" - keystoreSetting 's3.client.integration_test.secret_key', "s3_integration_test_secret_key" + keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey + keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey + + keystoreSetting 's3.client.integration_test_temporary.access_key', s3TemporaryAccessKey + keystoreSetting 's3.client.integration_test_temporary.secret_key', s3TemporarySecretKey + keystoreSetting 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken + + if (useFixture) { + dependsOn s3Fixture + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 's3.client.integration_test_permanent.endpoint', "http://${-> s3Fixture.addressAndPort}" + setting 's3.client.integration_test_temporary.endpoint', "http://${-> s3Fixture.addressAndPort}" + } else { + println "Using an external service to test the repository-s3 plugin" + } } thirdPartyAudit.excludes = [ diff --git a/plugins/repository-s3/qa/amazon-s3/build.gradle b/plugins/repository-s3/qa/amazon-s3/build.gradle deleted file mode 100644 index b6cc4a6de310d..0000000000000 --- a/plugins/repository-s3/qa/amazon-s3/build.gradle +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import org.elasticsearch.gradle.MavenFilteringHack -import org.elasticsearch.gradle.test.AntFixture - -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' - -dependencies { - testCompile project(path: ':plugins:repository-s3', configuration: 'runtime') -} - -integTestCluster { - plugin ':plugins:repository-s3' -} - -forbiddenApisTest { - // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage - bundledSignatures -= 'jdk-non-portable' - bundledSignatures += 'jdk-internal' -} - -boolean useFixture = false - -// We test against two repositories, one which uses the usual two-part "permanent" credentials and -// the other which uses three-part "temporary" or "session" credentials. - -String s3PermanentAccessKey = System.getenv("amazon_s3_access_key") -String s3PermanentSecretKey = System.getenv("amazon_s3_secret_key") -String s3PermanentBucket = System.getenv("amazon_s3_bucket") -String s3PermanentBasePath = System.getenv("amazon_s3_base_path") - -String s3TemporaryAccessKey = System.getenv("amazon_s3_access_key_temporary") -String s3TemporarySecretKey = System.getenv("amazon_s3_secret_key_temporary") -String s3TemporarySessionToken = System.getenv("amazon_s3_session_token_temporary") -String s3TemporaryBucket = System.getenv("amazon_s3_bucket_temporary") -String s3TemporaryBasePath = System.getenv("amazon_s3_base_path_temporary") - -// If all these variables are missing then we are testing against the internal fixture instead, which has the following -// credentials hard-coded in. - -if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3PermanentBasePath - && !s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3TemporaryBasePath && !s3TemporarySessionToken) { - - s3PermanentAccessKey = 's3_integration_test_permanent_access_key' - s3PermanentSecretKey = 's3_integration_test_permanent_secret_key' - s3PermanentBucket = 'permanent_bucket_test' - s3PermanentBasePath = 'integration_test' - - s3TemporaryAccessKey = 's3_integration_test_temporary_access_key' - s3TemporarySecretKey = 's3_integration_test_temporary_secret_key' - s3TemporaryBucket = 'temporary_bucket_test' - s3TemporaryBasePath = 'integration_test' - s3TemporarySessionToken = 's3_integration_test_temporary_session_token' - - useFixture = true -} - -/** A task to start the AmazonS3Fixture which emulates a S3 service **/ -task s3Fixture(type: AntFixture) { - dependsOn compileTestJava - env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" - executable = new File(project.runtimeJavaHome, 'bin/java') - args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3PermanentBucket, s3TemporaryBucket -} - -Map expansions = [ - 'permanent_bucket': s3PermanentBucket, - 'permanent_base_path': s3PermanentBasePath, - 'temporary_bucket': s3TemporaryBucket, - 'temporary_base_path': s3TemporaryBasePath -] -processTestResources { - inputs.properties(expansions) - MavenFilteringHack.filter(it, expansions) -} - -integTestCluster { - keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - - keystoreSetting 's3.client.integration_test_temporary.access_key', s3TemporaryAccessKey - keystoreSetting 's3.client.integration_test_temporary.secret_key', s3TemporarySecretKey - keystoreSetting 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken - - if (useFixture) { - println "Using internal test service to test the repository-s3 plugin" - dependsOn s3Fixture - /* Use a closure on the string to delay evaluation until tests are executed */ - setting 's3.client.integration_test_permanent.endpoint', "http://${-> s3Fixture.addressAndPort}" - setting 's3.client.integration_test_temporary.endpoint', "http://${-> s3Fixture.addressAndPort}" - } else { - println "Using an external service to test the repository-s3 plugin" - } -} \ No newline at end of file diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java deleted file mode 100644 index afcc0fa353482..0000000000000 --- a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3RepositoryClientYamlTestSuiteIT.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; - -public class AmazonS3RepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - - public AmazonS3RepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); - } -} diff --git a/plugins/repository-s3/qa/build.gradle b/plugins/repository-s3/qa/build.gradle deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java similarity index 100% rename from plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java rename to plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository_permanent_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml similarity index 96% rename from plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository_permanent_credentials.yml rename to plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index bb934d0931ca9..aa9d05e0579e3 100644 --- a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/10_repository_permanent_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -17,7 +17,7 @@ setup: storage_class: standard --- -"Snapshot/Restore with repository-s3 using permanent credentials": +"Snapshot and Restore with repository-s3 using permanent credentials": # Get repository - do: @@ -183,6 +183,10 @@ setup: --- "Register a repository with a non existing bucket": + - skip: + version: all + reason: to be fixed + - do: catch: /repository_exception/ snapshot.create_repository: @@ -191,7 +195,7 @@ setup: type: s3 settings: bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test + client: integration_test_permanent --- "Register a repository with a non existing client": diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_temporary_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml similarity index 96% rename from plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_temporary_credentials.yml rename to plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml index 5da4f739cd522..61ec7722903b6 100644 --- a/plugins/repository-s3/qa/amazon-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_temporary_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml @@ -17,7 +17,7 @@ setup: storage_class: standard --- -"Snapshot/Restore with repository-s3 using temporary credentials": +"Snapshot and Restore with repository-s3 using temporary credentials": # Get repository - do: @@ -183,6 +183,10 @@ setup: --- "Register a repository with a non existing bucket": + - skip: + version: all + reason: to be fixed + - do: catch: /repository_exception/ snapshot.create_repository: @@ -191,7 +195,7 @@ setup: type: s3 settings: bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE - client: integration_test + client: integration_test_temporary --- "Register a repository with a non existing client": From e9f8442beeea4bcd34d442a5ee7e523779dccaf2 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Wed, 4 Jul 2018 08:15:45 +0200 Subject: [PATCH 31/36] [ML] Return statistics about forecasts as part of the jobsstats and usage API (#31647) This change adds stats about forecasts, to the jobstats api as well as xpack/_usage. The following information is collected: _xpack/ml/anomaly_detectors/{jobid|_all}/_stats: - total number of forecasts - memory statistics (mean/min/max) - runtime statistics - record statistics - counts by status _xpack/usage - collected by job status as well as overall (_all): - total number of forecasts - number of jobs that have at least 1 forecast - memory, runtime, record statistics - counts by status Fixes #31395 --- x-pack/docs/en/rest-api/ml/jobcounts.asciidoc | 31 +++ .../ml/MachineLearningFeatureSetUsage.java | 1 + .../core/ml/action/GetJobsStatsAction.java | 27 +- .../xpack/core/ml/stats/CountAccumulator.java | 82 ++++++ .../xpack/core/ml/stats/ForecastStats.java | 152 +++++++++++ .../xpack/core/ml/stats/StatsAccumulator.java | 126 +++++++++ .../GetJobStatsActionResponseTests.java | 11 +- .../core/ml/stats/CountAccumulatorTests.java | 100 +++++++ .../core/ml/stats/ForecastStatsTests.java | 254 ++++++++++++++++++ .../core/ml/stats/StatsAccumulatorTests.java | 160 +++++++++++ .../xpack/ml/MachineLearningFeatureSet.java | 16 +- .../action/TransportGetJobsStatsAction.java | 46 ++-- .../xpack/ml/job/persistence/JobProvider.java | 53 ++++ .../xpack/ml/utils/StatsAccumulator.java | 57 ---- .../ml/MachineLearningFeatureSetTests.java | 27 +- .../TransportGetJobsStatsActionTests.java | 17 +- .../xpack/ml/utils/StatsAccumulatorTests.java | 63 ----- .../ml/JobStatsMonitoringDocTests.java | 8 +- 18 files changed, 1071 insertions(+), 160 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulator.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/ForecastStats.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulator.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulatorTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/ForecastStatsTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulatorTests.java delete mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/StatsAccumulator.java delete mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/StatsAccumulatorTests.java diff --git a/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc b/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc index b2e24a298cbd0..d343cc23ae0ad 100644 --- a/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc +++ b/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc @@ -20,6 +20,10 @@ progress of a job. (object) An object that provides information about the size and contents of the model. See <> +`forecasts_stats`:: + (object) An object that provides statistical information about forecasts + of this job. See <> + `node`:: (object) For open jobs only, contains information about the node where the job runs. See <>. @@ -177,6 +181,33 @@ NOTE: The `over` field values are counted separately for each detector and parti `timestamp`:: (date) The timestamp of the `model_size_stats` according to the timestamp of the data. +[float] +[[ml-forecastsstats]] +==== Forecasts Stats Objects + +The `forecasts_stats` object shows statistics about forecasts. It has the following properties: + +`total`:: + (long) The number of forecasts currently available for this model. + +`forecasted_jobs`:: + (long) The number of jobs that have at least one forecast. + +`memory_bytes`:: + (object) Statistics about the memory usage: minimum, maximum, average and total. + +`records`:: + (object) Statistics about the number of forecast records: minimum, maximum, average and total. + +`processing_time_ms`:: + (object) Statistics about the forecast runtime in milliseconds: minimum, maximum, average and total. + +`status`:: + (object) Counts per forecast status, for example: {"finished" : 2}. + +NOTE: `memory_bytes`, `records`, `processing_time_ms` and `status` require at least 1 forecast, otherwise +these fields are ommitted. + [float] [[ml-stats-node]] ==== Node Objects diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java index 1779ca703a5d7..ebcaab8495eba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java @@ -22,6 +22,7 @@ public class MachineLearningFeatureSetUsage extends XPackFeatureSet.Usage { public static final String DATAFEEDS_FIELD = "datafeeds"; public static final String COUNT = "count"; public static final String DETECTORS = "detectors"; + public static final String FORECASTS = "forecasts"; public static final String MODEL_SIZE = "model_size"; private final Map jobsUsage; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index ad34f5611383f..807c09363759b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.stats.ForecastStats; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; @@ -46,6 +47,7 @@ public class GetJobsStatsAction extends Action { private static final String DATA_COUNTS = "data_counts"; private static final String MODEL_SIZE_STATS = "model_size_stats"; + private static final String FORECASTS_STATS = "forecasts_stats"; private static final String STATE = "state"; private static final String NODE = "node"; @@ -154,6 +156,8 @@ public static class JobStats implements ToXContentObject, Writeable { @Nullable private ModelSizeStats modelSizeStats; @Nullable + private ForecastStats forecastStats; + @Nullable private TimeValue openTime; private JobState state; @Nullable @@ -161,11 +165,13 @@ public static class JobStats implements ToXContentObject, Writeable { @Nullable private String assignmentExplanation; - public JobStats(String jobId, DataCounts dataCounts, @Nullable ModelSizeStats modelSizeStats, JobState state, - @Nullable DiscoveryNode node, @Nullable String assignmentExplanation, @Nullable TimeValue opentime) { + public JobStats(String jobId, DataCounts dataCounts, @Nullable ModelSizeStats modelSizeStats, + @Nullable ForecastStats forecastStats, JobState state, @Nullable DiscoveryNode node, + @Nullable String assignmentExplanation, @Nullable TimeValue opentime) { this.jobId = Objects.requireNonNull(jobId); this.dataCounts = Objects.requireNonNull(dataCounts); this.modelSizeStats = modelSizeStats; + this.forecastStats = forecastStats; this.state = Objects.requireNonNull(state); this.node = node; this.assignmentExplanation = assignmentExplanation; @@ -180,6 +186,9 @@ public JobStats(StreamInput in) throws IOException { node = in.readOptionalWriteable(DiscoveryNode::new); assignmentExplanation = in.readOptionalString(); openTime = in.readOptionalTimeValue(); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + forecastStats = in.readOptionalWriteable(ForecastStats::new); + } } public String getJobId() { @@ -193,6 +202,10 @@ public DataCounts getDataCounts() { public ModelSizeStats getModelSizeStats() { return modelSizeStats; } + + public ForecastStats getForecastStats() { + return forecastStats; + } public JobState getState() { return state; @@ -226,6 +239,10 @@ public XContentBuilder toUnwrappedXContent(XContentBuilder builder) throws IOExc if (modelSizeStats != null) { builder.field(MODEL_SIZE_STATS, modelSizeStats); } + if (forecastStats != null) { + builder.field(FORECASTS_STATS, forecastStats); + } + builder.field(STATE, state.toString()); if (node != null) { builder.startObject(NODE); @@ -259,11 +276,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(node); out.writeOptionalString(assignmentExplanation); out.writeOptionalTimeValue(openTime); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeOptionalWriteable(forecastStats); + } } @Override public int hashCode() { - return Objects.hash(jobId, dataCounts, modelSizeStats, state, node, assignmentExplanation, openTime); + return Objects.hash(jobId, dataCounts, modelSizeStats, forecastStats, state, node, assignmentExplanation, openTime); } @Override @@ -278,6 +298,7 @@ public boolean equals(Object obj) { return Objects.equals(jobId, other.jobId) && Objects.equals(this.dataCounts, other.dataCounts) && Objects.equals(this.modelSizeStats, other.modelSizeStats) + && Objects.equals(this.forecastStats, other.forecastStats) && Objects.equals(this.state, other.state) && Objects.equals(this.node, other.node) && Objects.equals(this.assignmentExplanation, other.assignmentExplanation) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulator.java new file mode 100644 index 0000000000000..638aa8a2fa6be --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulator.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ml.stats; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Map.Entry; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * An accumulator for simple counts where statistical measures + * are not of interest. + */ +public class CountAccumulator implements Writeable { + + private Map counts; + + public CountAccumulator() { + this.counts = new HashMap(); + } + + private CountAccumulator(Map counts) { + this.counts = counts; + } + + public CountAccumulator(StreamInput in) throws IOException { + this.counts = in.readMap(StreamInput::readString, StreamInput::readLong); + } + + public void merge(CountAccumulator other) { + counts = Stream.of(counts, other.counts).flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(Entry::getKey, Entry::getValue, (x, y) -> x + y)); + } + + public void add(String key, Long count) { + counts.put(key, counts.getOrDefault(key, 0L) + count); + } + + public Map asMap() { + return counts; + } + + public static CountAccumulator fromTermsAggregation(StringTerms termsAggregation) { + return new CountAccumulator(termsAggregation.getBuckets().stream() + .collect(Collectors.toMap(bucket -> bucket.getKeyAsString(), bucket -> bucket.getDocCount()))); + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(counts, StreamOutput::writeString, StreamOutput::writeLong); + } + + @Override + public int hashCode() { + return Objects.hash(counts); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + CountAccumulator other = (CountAccumulator) obj; + return Objects.equals(counts, other.counts); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/ForecastStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/ForecastStats.java new file mode 100644 index 0000000000000..d490e4b98a44a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/ForecastStats.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ml.stats; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * A class to hold statistics about forecasts. + */ +public class ForecastStats implements ToXContentObject, Writeable { + + public static class Fields { + public static final String TOTAL = "total"; + public static final String FORECASTED_JOBS = "forecasted_jobs"; + public static final String MEMORY = "memory_bytes"; + public static final String RUNTIME = "processing_time_ms"; + public static final String RECORDS = "records"; + public static final String STATUSES = "status"; + } + + private long total; + private long forecastedJobs; + private StatsAccumulator memoryStats; + private StatsAccumulator recordStats; + private StatsAccumulator runtimeStats; + private CountAccumulator statusCounts; + + public ForecastStats() { + this.total = 0; + this.forecastedJobs = 0; + this.memoryStats = new StatsAccumulator(); + this.recordStats = new StatsAccumulator(); + this.runtimeStats = new StatsAccumulator(); + this.statusCounts = new CountAccumulator(); + } + + /* + * Construct ForecastStats for 1 job. Additional statistics can be added by merging other ForecastStats into it. + */ + public ForecastStats(long total, StatsAccumulator memoryStats, StatsAccumulator recordStats, StatsAccumulator runtimeStats, + CountAccumulator statusCounts) { + this.total = total; + this.forecastedJobs = total > 0 ? 1 : 0; + this.memoryStats = Objects.requireNonNull(memoryStats); + this.recordStats = Objects.requireNonNull(recordStats); + this.runtimeStats = Objects.requireNonNull(runtimeStats); + this.statusCounts = Objects.requireNonNull(statusCounts); + } + + public ForecastStats(StreamInput in) throws IOException { + this.total = in.readLong(); + this.forecastedJobs = in.readLong(); + this.memoryStats = new StatsAccumulator(in); + this.recordStats = new StatsAccumulator(in); + this.runtimeStats = new StatsAccumulator(in); + this.statusCounts = new CountAccumulator(in); + } + + public ForecastStats merge(ForecastStats other) { + if (other == null) { + return this; + } + total += other.total; + forecastedJobs += other.forecastedJobs; + memoryStats.merge(other.memoryStats); + recordStats.merge(other.recordStats); + runtimeStats.merge(other.runtimeStats); + statusCounts.merge(other.statusCounts); + + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + doXContentBody(builder, params); + return builder.endObject(); + } + + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.field(Fields.TOTAL, total); + builder.field(Fields.FORECASTED_JOBS, forecastedJobs); + + if (total > 0) { + builder.field(Fields.MEMORY, memoryStats.asMap()); + builder.field(Fields.RECORDS, recordStats.asMap()); + builder.field(Fields.RUNTIME, runtimeStats.asMap()); + builder.field(Fields.STATUSES, statusCounts.asMap()); + } + + return builder; + } + + public Map asMap() { + Map map = new HashMap<>(); + map.put(Fields.TOTAL, total); + map.put(Fields.FORECASTED_JOBS, forecastedJobs); + + if (total > 0) { + map.put(Fields.MEMORY, memoryStats.asMap()); + map.put(Fields.RECORDS, recordStats.asMap()); + map.put(Fields.RUNTIME, runtimeStats.asMap()); + map.put(Fields.STATUSES, statusCounts.asMap()); + } + + return map; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(total); + out.writeLong(forecastedJobs); + memoryStats.writeTo(out); + recordStats.writeTo(out); + runtimeStats.writeTo(out); + statusCounts.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(total, forecastedJobs, memoryStats, recordStats, runtimeStats, statusCounts); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + ForecastStats other = (ForecastStats) obj; + return Objects.equals(total, other.total) && Objects.equals(forecastedJobs, other.forecastedJobs) + && Objects.equals(memoryStats, other.memoryStats) && Objects.equals(recordStats, other.recordStats) + && Objects.equals(runtimeStats, other.runtimeStats) && Objects.equals(statusCounts, other.statusCounts); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulator.java new file mode 100644 index 0000000000000..fe987db48ce17 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulator.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.stats; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.search.aggregations.metrics.stats.Stats; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Helper class to collect min, max, avg and total statistics for a quantity + */ +public class StatsAccumulator implements Writeable { + + public static class Fields { + public static final String MIN = "min"; + public static final String MAX = "max"; + public static final String AVG = "avg"; + public static final String TOTAL = "total"; + } + + private long count; + private double total; + private Double min; + private Double max; + + public StatsAccumulator() { + } + + public StatsAccumulator(StreamInput in) throws IOException { + count = in.readLong(); + total = in.readDouble(); + min = in.readOptionalDouble(); + max = in.readOptionalDouble(); + } + + private StatsAccumulator(long count, double total, double min, double max) { + this.count = count; + this.total = total; + this.min = min; + this.max = max; + } + + public void add(double value) { + count++; + total += value; + min = min == null ? value : (value < min ? value : min); + max = max == null ? value : (value > max ? value : max); + } + + public double getMin() { + return min == null ? 0.0 : min; + } + + public double getMax() { + return max == null ? 0.0 : max; + } + + public double getAvg() { + return count == 0.0 ? 0.0 : total/count; + } + + public double getTotal() { + return total; + } + + public void merge(StatsAccumulator other) { + count += other.count; + total += other.total; + + // note: not using Math.min/max as some internal prefetch optimization causes an NPE + min = min == null ? other.min : (other.min == null ? min : other.min < min ? other.min : min); + max = max == null ? other.max : (other.max == null ? max : other.max > max ? other.max : max); + } + + public Map asMap() { + Map map = new HashMap<>(); + map.put(Fields.MIN, getMin()); + map.put(Fields.MAX, getMax()); + map.put(Fields.AVG, getAvg()); + map.put(Fields.TOTAL, getTotal()); + return map; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(count); + out.writeDouble(total); + out.writeOptionalDouble(min); + out.writeOptionalDouble(max); + } + + public static StatsAccumulator fromStatsAggregation(Stats statsAggregation) { + return new StatsAccumulator(statsAggregation.getCount(), statsAggregation.getSum(), statsAggregation.getMin(), + statsAggregation.getMax()); + } + + @Override + public int hashCode() { + return Objects.hash(count, total, min, max); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + StatsAccumulator other = (StatsAccumulator) obj; + return Objects.equals(count, other.count) && Objects.equals(total, other.total) && Objects.equals(min, other.min) + && Objects.equals(max, other.max); + } +} + diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionResponseTests.java index ff979a8570aba..86a5b990728f8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetJobStatsActionResponseTests.java @@ -17,6 +17,8 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCountsTests; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.stats.ForecastStats; +import org.elasticsearch.xpack.core.ml.stats.ForecastStatsTests; import java.net.InetAddress; import java.util.ArrayList; @@ -42,6 +44,12 @@ protected Response createTestInstance() { if (randomBoolean()) { sizeStats = new ModelSizeStats.Builder("foo").build(); } + + ForecastStats forecastStats = null; + if (randomBoolean()) { + forecastStats = new ForecastStatsTests().createTestInstance(); + } + JobState jobState = randomFrom(EnumSet.allOf(JobState.class)); DiscoveryNode node = null; @@ -56,7 +64,8 @@ protected Response createTestInstance() { if (randomBoolean()) { openTime = parseTimeValue(randomPositiveTimeValue(), "open_time-Test"); } - Response.JobStats jobStats = new Response.JobStats(jobId, dataCounts, sizeStats, jobState, node, explanation, openTime); + Response.JobStats jobStats = new Response.JobStats(jobId, dataCounts, sizeStats, forecastStats, jobState, node, explanation, + openTime); jobStatsList.add(jobStats); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulatorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulatorTests.java new file mode 100644 index 0000000000000..4e18a70a3a0a2 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulatorTests.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ml.stats; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms.Bucket; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CountAccumulatorTests extends AbstractWireSerializingTestCase { + + public void testEmpty() { + CountAccumulator accumulator = new CountAccumulator(); + assertEquals(Collections.emptyMap(), accumulator.asMap()); + } + + public void testAdd() { + CountAccumulator accumulator = new CountAccumulator(); + accumulator.add("a", 22L); + accumulator.add("a", 10L); + accumulator.add("a", 15L); + accumulator.add("a", -12L); + accumulator.add("a", 0L); + + accumulator.add("b", 13L); + accumulator.add("b", 1L); + accumulator.add("b", 40000L); + accumulator.add("b", -2L); + accumulator.add("b", 333L); + + assertEquals(35L, accumulator.asMap().get("a").longValue()); + assertEquals(40345L, accumulator.asMap().get("b").longValue()); + assertEquals(2, accumulator.asMap().size()); + } + + public void testMerge() { + CountAccumulator accumulator = new CountAccumulator(); + accumulator.add("a", 13L); + accumulator.add("b", 42L); + + CountAccumulator accumulator2 = new CountAccumulator(); + accumulator2.add("a", 12L); + accumulator2.add("c", -1L); + + accumulator.merge(accumulator2); + + assertEquals(25L, accumulator.asMap().get("a").longValue()); + assertEquals(42L, accumulator.asMap().get("b").longValue()); + assertEquals(-1L, accumulator.asMap().get("c").longValue()); + assertEquals(3, accumulator.asMap().size()); + } + + public void testFromTermsAggregation() { + StringTerms termsAggregation = mock(StringTerms.class); + + Bucket bucket1 = mock(Bucket.class); + when(bucket1.getKeyAsString()).thenReturn("a"); + when(bucket1.getDocCount()).thenReturn(10L); + + Bucket bucket2 = mock(Bucket.class); + when(bucket2.getKeyAsString()).thenReturn("b"); + when(bucket2.getDocCount()).thenReturn(33L); + + List buckets = Arrays.asList(bucket1, bucket2); + when(termsAggregation.getBuckets()).thenReturn(buckets); + + CountAccumulator accumulator = CountAccumulator.fromTermsAggregation(termsAggregation); + + assertEquals(10L, accumulator.asMap().get("a").longValue()); + assertEquals(33L, accumulator.asMap().get("b").longValue()); + assertEquals(2, accumulator.asMap().size()); + } + + @Override + public CountAccumulator createTestInstance() { + CountAccumulator accumulator = new CountAccumulator(); + for (int i = 0; i < randomInt(10); ++i) { + accumulator.add(randomAlphaOfLengthBetween(1, 20), randomLongBetween(1L, 100L)); + } + + return accumulator; + } + + @Override + protected Reader instanceReader() { + return CountAccumulator::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/ForecastStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/ForecastStatsTests.java new file mode 100644 index 0000000000000..f7f5d16c5e578 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/ForecastStatsTests.java @@ -0,0 +1,254 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ml.stats; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.ml.stats.ForecastStats; +import org.elasticsearch.xpack.core.ml.stats.ForecastStats.Fields; + +import java.io.IOException; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class ForecastStatsTests extends AbstractWireSerializingTestCase { + + public void testEmpty() throws IOException { + ForecastStats forecastStats = new ForecastStats(); + + XContentBuilder builder = JsonXContent.contentBuilder(); + forecastStats.toXContent(builder, ToXContent.EMPTY_PARAMS); + + XContentParser parser = createParser(builder); + Map properties = parser.map(); + assertTrue(properties.containsKey(Fields.TOTAL)); + assertTrue(properties.containsKey(Fields.FORECASTED_JOBS)); + assertFalse(properties.containsKey(Fields.MEMORY)); + assertFalse(properties.containsKey(Fields.RECORDS)); + assertFalse(properties.containsKey(Fields.RUNTIME)); + assertFalse(properties.containsKey(Fields.STATUSES)); + } + + public void testMerge() { + StatsAccumulator memoryStats = new StatsAccumulator(); + memoryStats.add(1000); + memoryStats.add(45000); + memoryStats.add(2300); + + StatsAccumulator recordStats = new StatsAccumulator(); + recordStats.add(10); + recordStats.add(0); + recordStats.add(20); + + StatsAccumulator runtimeStats = new StatsAccumulator(); + runtimeStats.add(0); + runtimeStats.add(0); + runtimeStats.add(10); + + CountAccumulator statusStats = new CountAccumulator(); + statusStats.add("finished", 2L); + statusStats.add("failed", 5L); + + ForecastStats forecastStats = new ForecastStats(3, memoryStats, recordStats, runtimeStats, statusStats); + + StatsAccumulator memoryStats2 = new StatsAccumulator(); + memoryStats2.add(10); + memoryStats2.add(30); + + StatsAccumulator recordStats2 = new StatsAccumulator(); + recordStats2.add(10); + recordStats2.add(0); + + StatsAccumulator runtimeStats2 = new StatsAccumulator(); + runtimeStats2.add(96); + runtimeStats2.add(0); + + CountAccumulator statusStats2 = new CountAccumulator(); + statusStats2.add("finished", 2L); + statusStats2.add("scheduled", 1L); + + ForecastStats forecastStats2 = new ForecastStats(2, memoryStats2, recordStats2, runtimeStats2, statusStats2); + + forecastStats.merge(forecastStats2); + + Map mergedStats = forecastStats.asMap(); + + assertEquals(2L, mergedStats.get(Fields.FORECASTED_JOBS)); + assertEquals(5L, mergedStats.get(Fields.TOTAL)); + + @SuppressWarnings("unchecked") + Map mergedMemoryStats = (Map) mergedStats.get(Fields.MEMORY); + + assertTrue(mergedMemoryStats != null); + assertThat(mergedMemoryStats.get(StatsAccumulator.Fields.AVG), equalTo(9668.0)); + assertThat(mergedMemoryStats.get(StatsAccumulator.Fields.MAX), equalTo(45000.0)); + assertThat(mergedMemoryStats.get(StatsAccumulator.Fields.MIN), equalTo(10.0)); + + @SuppressWarnings("unchecked") + Map mergedRecordStats = (Map) mergedStats.get(Fields.RECORDS); + + assertTrue(mergedRecordStats != null); + assertThat(mergedRecordStats.get(StatsAccumulator.Fields.AVG), equalTo(8.0)); + assertThat(mergedRecordStats.get(StatsAccumulator.Fields.MAX), equalTo(20.0)); + assertThat(mergedRecordStats.get(StatsAccumulator.Fields.MIN), equalTo(0.0)); + + @SuppressWarnings("unchecked") + Map mergedRuntimeStats = (Map) mergedStats.get(Fields.RUNTIME); + + assertTrue(mergedRuntimeStats != null); + assertThat(mergedRuntimeStats.get(StatsAccumulator.Fields.AVG), equalTo(21.2)); + assertThat(mergedRuntimeStats.get(StatsAccumulator.Fields.MAX), equalTo(96.0)); + assertThat(mergedRuntimeStats.get(StatsAccumulator.Fields.MIN), equalTo(0.0)); + + @SuppressWarnings("unchecked") + Map mergedCountStats = (Map) mergedStats.get(Fields.STATUSES); + + assertTrue(mergedCountStats != null); + assertEquals(3, mergedCountStats.size()); + assertEquals(4, mergedCountStats.get("finished").longValue()); + assertEquals(5, mergedCountStats.get("failed").longValue()); + assertEquals(1, mergedCountStats.get("scheduled").longValue()); + } + + public void testChainedMerge() { + StatsAccumulator memoryStats = new StatsAccumulator(); + memoryStats.add(1000); + memoryStats.add(45000); + memoryStats.add(2300); + StatsAccumulator recordStats = new StatsAccumulator(); + recordStats.add(10); + recordStats.add(0); + recordStats.add(20); + StatsAccumulator runtimeStats = new StatsAccumulator(); + runtimeStats.add(0); + runtimeStats.add(0); + runtimeStats.add(10); + CountAccumulator statusStats = new CountAccumulator(); + statusStats.add("finished", 2L); + statusStats.add("failed", 5L); + ForecastStats forecastStats = new ForecastStats(3, memoryStats, recordStats, runtimeStats, statusStats); + + StatsAccumulator memoryStats2 = new StatsAccumulator(); + memoryStats2.add(10); + memoryStats2.add(30); + StatsAccumulator recordStats2 = new StatsAccumulator(); + recordStats2.add(10); + recordStats2.add(0); + StatsAccumulator runtimeStats2 = new StatsAccumulator(); + runtimeStats2.add(96); + runtimeStats2.add(0); + CountAccumulator statusStats2 = new CountAccumulator(); + statusStats2.add("finished", 2L); + statusStats2.add("scheduled", 1L); + ForecastStats forecastStats2 = new ForecastStats(2, memoryStats2, recordStats2, runtimeStats2, statusStats2); + + StatsAccumulator memoryStats3 = new StatsAccumulator(); + memoryStats3.add(500); + StatsAccumulator recordStats3 = new StatsAccumulator(); + recordStats3.add(50); + StatsAccumulator runtimeStats3 = new StatsAccumulator(); + runtimeStats3.add(32); + CountAccumulator statusStats3 = new CountAccumulator(); + statusStats3.add("finished", 1L); + ForecastStats forecastStats3 = new ForecastStats(1, memoryStats3, recordStats3, runtimeStats3, statusStats3); + + ForecastStats forecastStats4 = new ForecastStats(); + + // merge 4 into 3 + forecastStats3.merge(forecastStats4); + + // merge 3 into 2 + forecastStats2.merge(forecastStats3); + + // merger 2 into 1 + forecastStats.merge(forecastStats2); + + Map mergedStats = forecastStats.asMap(); + + assertEquals(3L, mergedStats.get(Fields.FORECASTED_JOBS)); + assertEquals(6L, mergedStats.get(Fields.TOTAL)); + + @SuppressWarnings("unchecked") + Map mergedMemoryStats = (Map) mergedStats.get(Fields.MEMORY); + + assertTrue(mergedMemoryStats != null); + assertThat(mergedMemoryStats.get(StatsAccumulator.Fields.AVG), equalTo(8140.0)); + assertThat(mergedMemoryStats.get(StatsAccumulator.Fields.MAX), equalTo(45000.0)); + assertThat(mergedMemoryStats.get(StatsAccumulator.Fields.MIN), equalTo(10.0)); + + @SuppressWarnings("unchecked") + Map mergedRecordStats = (Map) mergedStats.get(Fields.RECORDS); + + assertTrue(mergedRecordStats != null); + assertThat(mergedRecordStats.get(StatsAccumulator.Fields.AVG), equalTo(15.0)); + assertThat(mergedRecordStats.get(StatsAccumulator.Fields.MAX), equalTo(50.0)); + assertThat(mergedRecordStats.get(StatsAccumulator.Fields.MIN), equalTo(0.0)); + + @SuppressWarnings("unchecked") + Map mergedRuntimeStats = (Map) mergedStats.get(Fields.RUNTIME); + + assertTrue(mergedRuntimeStats != null); + assertThat(mergedRuntimeStats.get(StatsAccumulator.Fields.AVG), equalTo(23.0)); + assertThat(mergedRuntimeStats.get(StatsAccumulator.Fields.MAX), equalTo(96.0)); + assertThat(mergedRuntimeStats.get(StatsAccumulator.Fields.MIN), equalTo(0.0)); + + @SuppressWarnings("unchecked") + Map mergedCountStats = (Map) mergedStats.get(Fields.STATUSES); + + assertTrue(mergedCountStats != null); + assertEquals(3, mergedCountStats.size()); + assertEquals(5, mergedCountStats.get("finished").longValue()); + assertEquals(5, mergedCountStats.get("failed").longValue()); + assertEquals(1, mergedCountStats.get("scheduled").longValue()); + } + + public void testUniqueCountOfJobs() { + ForecastStats forecastStats = createForecastStats(5, 10); + ForecastStats forecastStats2 = createForecastStats(2, 8); + ForecastStats forecastStats3 = createForecastStats(0, 0); + ForecastStats forecastStats4 = createForecastStats(0, 0); + ForecastStats forecastStats5 = createForecastStats(1, 12); + + forecastStats.merge(forecastStats2); + forecastStats.merge(forecastStats3); + forecastStats.merge(forecastStats4); + forecastStats.merge(forecastStats5); + + assertEquals(3L, forecastStats.asMap().get(Fields.FORECASTED_JOBS)); + } + + @Override + public ForecastStats createTestInstance() { + return createForecastStats(1, 22); + } + + @Override + protected Reader instanceReader() { + return ForecastStats::new; + } + + public ForecastStats createForecastStats(long minTotal, long maxTotal) { + ForecastStats forecastStats = new ForecastStats(randomLongBetween(minTotal, maxTotal), createStatsAccumulator(), + createStatsAccumulator(), createStatsAccumulator(), createCountAccumulator()); + + return forecastStats; + } + + private StatsAccumulator createStatsAccumulator() { + return new StatsAccumulatorTests().createTestInstance(); + } + + private CountAccumulator createCountAccumulator() { + return new CountAccumulatorTests().createTestInstance(); + + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulatorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulatorTests.java new file mode 100644 index 0000000000000..bd2df0823ae17 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/stats/StatsAccumulatorTests.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.stats; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class StatsAccumulatorTests extends AbstractWireSerializingTestCase { + + public void testGivenNoValues() { + StatsAccumulator accumulator = new StatsAccumulator(); + assertThat(accumulator.getMin(), equalTo(0.0)); + assertThat(accumulator.getMax(), equalTo(0.0)); + assertThat(accumulator.getTotal(), equalTo(0.0)); + assertThat(accumulator.getAvg(), equalTo(0.0)); + } + + public void testGivenPositiveValues() { + StatsAccumulator accumulator = new StatsAccumulator(); + + for (int i = 1; i <= 10; i++) { + accumulator.add(i); + } + + assertThat(accumulator.getMin(), equalTo(1.0)); + assertThat(accumulator.getMax(), equalTo(10.0)); + assertThat(accumulator.getTotal(), equalTo(55.0)); + assertThat(accumulator.getAvg(), equalTo(5.5)); + } + + public void testGivenNegativeValues() { + StatsAccumulator accumulator = new StatsAccumulator(); + + for (int i = 1; i <= 10; i++) { + accumulator.add(-1 * i); + } + + assertThat(accumulator.getMin(), equalTo(-10.0)); + assertThat(accumulator.getMax(), equalTo(-1.0)); + assertThat(accumulator.getTotal(), equalTo(-55.0)); + assertThat(accumulator.getAvg(), equalTo(-5.5)); + } + + public void testAsMap() { + StatsAccumulator accumulator = new StatsAccumulator(); + accumulator.add(5.0); + accumulator.add(10.0); + + Map expectedMap = new HashMap<>(); + expectedMap.put("min", 5.0); + expectedMap.put("max", 10.0); + expectedMap.put("avg", 7.5); + expectedMap.put("total", 15.0); + assertThat(accumulator.asMap(), equalTo(expectedMap)); + } + + public void testMerge() { + StatsAccumulator accumulator = new StatsAccumulator(); + accumulator.add(5.0); + accumulator.add(10.0); + + assertThat(accumulator.getMin(), equalTo(5.0)); + assertThat(accumulator.getMax(), equalTo(10.0)); + assertThat(accumulator.getTotal(), equalTo(15.0)); + assertThat(accumulator.getAvg(), equalTo(7.5)); + + StatsAccumulator accumulator2 = new StatsAccumulator(); + accumulator2.add(1.0); + accumulator2.add(3.0); + accumulator2.add(7.0); + + assertThat(accumulator2.getMin(), equalTo(1.0)); + assertThat(accumulator2.getMax(), equalTo(7.0)); + assertThat(accumulator2.getTotal(), equalTo(11.0)); + assertThat(accumulator2.getAvg(), equalTo(11.0 / 3.0)); + + accumulator.merge(accumulator2); + assertThat(accumulator.getMin(), equalTo(1.0)); + assertThat(accumulator.getMax(), equalTo(10.0)); + assertThat(accumulator.getTotal(), equalTo(26.0)); + assertThat(accumulator.getAvg(), equalTo(5.2)); + + // same as accumulator + StatsAccumulator accumulator3 = new StatsAccumulator(); + accumulator3.add(5.0); + accumulator3.add(10.0); + + // merging the other way should yield the same results + accumulator2.merge(accumulator3); + assertThat(accumulator2.getMin(), equalTo(1.0)); + assertThat(accumulator2.getMax(), equalTo(10.0)); + assertThat(accumulator2.getTotal(), equalTo(26.0)); + assertThat(accumulator2.getAvg(), equalTo(5.2)); + } + + public void testMergeMixedEmpty() { + StatsAccumulator accumulator = new StatsAccumulator(); + + StatsAccumulator accumulator2 = new StatsAccumulator(); + accumulator2.add(1.0); + accumulator2.add(3.0); + accumulator.merge(accumulator2); + assertThat(accumulator.getMin(), equalTo(1.0)); + assertThat(accumulator.getMax(), equalTo(3.0)); + assertThat(accumulator.getTotal(), equalTo(4.0)); + + StatsAccumulator accumulator3 = new StatsAccumulator(); + accumulator.merge(accumulator3); + assertThat(accumulator.getMin(), equalTo(1.0)); + assertThat(accumulator.getMax(), equalTo(3.0)); + assertThat(accumulator.getTotal(), equalTo(4.0)); + + StatsAccumulator accumulator4 = new StatsAccumulator(); + accumulator3.merge(accumulator4); + + assertThat(accumulator3.getMin(), equalTo(0.0)); + assertThat(accumulator3.getMax(), equalTo(0.0)); + assertThat(accumulator3.getTotal(), equalTo(0.0)); + } + + public void testFromStatsAggregation() { + Stats stats = mock(Stats.class); + when(stats.getMax()).thenReturn(25.0); + when(stats.getMin()).thenReturn(2.5); + when(stats.getCount()).thenReturn(4L); + when(stats.getSum()).thenReturn(48.0); + when(stats.getAvg()).thenReturn(12.0); + + StatsAccumulator accumulator = StatsAccumulator.fromStatsAggregation(stats); + assertThat(accumulator.getMin(), equalTo(2.5)); + assertThat(accumulator.getMax(), equalTo(25.0)); + assertThat(accumulator.getTotal(), equalTo(48.0)); + assertThat(accumulator.getAvg(), equalTo(12.0)); + } + + @Override + public StatsAccumulator createTestInstance() { + StatsAccumulator accumulator = new StatsAccumulator(); + for (int i = 0; i < randomInt(10); ++i) { + accumulator.add(randomDoubleBetween(0.0, 1000.0, true)); + } + + return accumulator; + } + + @Override + protected Reader instanceReader() { + return StatsAccumulator::new; + } +} \ No newline at end of file diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index 05af1ffee17a4..14ac43fae101c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -33,7 +33,8 @@ import org.elasticsearch.xpack.ml.job.process.NativeController; import org.elasticsearch.xpack.ml.job.process.NativeControllerHolder; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; -import org.elasticsearch.xpack.ml.utils.StatsAccumulator; +import org.elasticsearch.xpack.core.ml.stats.ForecastStats; +import org.elasticsearch.xpack.core.ml.stats.StatsAccumulator; import java.io.IOException; import java.util.Arrays; @@ -192,10 +193,12 @@ public void execute(ActionListener listener) { private void addJobsUsage(GetJobsStatsAction.Response response) { StatsAccumulator allJobsDetectorsStats = new StatsAccumulator(); StatsAccumulator allJobsModelSizeStats = new StatsAccumulator(); + ForecastStats allJobsForecastStats = new ForecastStats(); Map jobCountByState = new HashMap<>(); Map detectorStatsByState = new HashMap<>(); Map modelSizeStatsByState = new HashMap<>(); + Map forecastStatsByState = new HashMap<>(); Map jobs = mlMetadata.getJobs(); List jobsStats = response.getResponse().results(); @@ -206,6 +209,7 @@ private void addJobsUsage(GetJobsStatsAction.Response response) { double modelSize = modelSizeStats == null ? 0.0 : jobStats.getModelSizeStats().getModelBytes(); + allJobsForecastStats.merge(jobStats.getForecastStats()); allJobsDetectorsStats.add(detectorsCount); allJobsModelSizeStats.add(modelSize); @@ -215,24 +219,28 @@ private void addJobsUsage(GetJobsStatsAction.Response response) { js -> new StatsAccumulator()).add(detectorsCount); modelSizeStatsByState.computeIfAbsent(jobState, js -> new StatsAccumulator()).add(modelSize); + forecastStatsByState.merge(jobState, jobStats.getForecastStats(), (f1, f2) -> f1.merge(f2)); } jobsUsage.put(MachineLearningFeatureSetUsage.ALL, createJobUsageEntry(jobs.size(), allJobsDetectorsStats, - allJobsModelSizeStats)); + allJobsModelSizeStats, allJobsForecastStats)); for (JobState jobState : jobCountByState.keySet()) { jobsUsage.put(jobState.name().toLowerCase(Locale.ROOT), createJobUsageEntry( jobCountByState.get(jobState).get(), detectorStatsByState.get(jobState), - modelSizeStatsByState.get(jobState))); + modelSizeStatsByState.get(jobState), + forecastStatsByState.get(jobState))); } } private Map createJobUsageEntry(long count, StatsAccumulator detectorStats, - StatsAccumulator modelSizeStats) { + StatsAccumulator modelSizeStats, + ForecastStats forecastStats) { Map usage = new HashMap<>(); usage.put(MachineLearningFeatureSetUsage.COUNT, count); usage.put(MachineLearningFeatureSetUsage.DETECTORS, detectorStats.asMap()); usage.put(MachineLearningFeatureSetUsage.MODEL_SIZE, modelSizeStats.asMap()); + usage.put(MachineLearningFeatureSetUsage.FORECASTS, forecastStats.asMap()); return usage; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java index 1182953dfc31e..31f918dfc2571 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.stats.ForecastStats; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.ml.job.persistence.JobProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -106,9 +107,12 @@ protected void taskOperation(GetJobsStatsAction.Request request, TransportOpenJo JobState jobState = MlMetadata.getJobState(jobId, tasks); String assignmentExplanation = pTask.getAssignment().getExplanation(); TimeValue openTime = durationToTimeValue(processManager.jobOpenTime(task)); - GetJobsStatsAction.Response.JobStats jobStats = new GetJobsStatsAction.Response.JobStats(jobId, stats.get().v1(), - stats.get().v2(), jobState, node, assignmentExplanation, openTime); - listener.onResponse(new QueryPage<>(Collections.singletonList(jobStats), 1, Job.RESULTS_FIELD)); + gatherForecastStats(jobId, forecastStats -> { + GetJobsStatsAction.Response.JobStats jobStats = new GetJobsStatsAction.Response.JobStats(jobId, stats.get().v1(), + stats.get().v2(), forecastStats, jobState, node, assignmentExplanation, openTime); + listener.onResponse(new QueryPage<>(Collections.singletonList(jobStats), 1, Job.RESULTS_FIELD)); + }, listener::onFailure); + } else { listener.onResponse(new QueryPage<>(Collections.emptyList(), 0, Job.RESULTS_FIELD)); } @@ -131,25 +135,31 @@ void gatherStatsForClosedJobs(MlMetadata mlMetadata, GetJobsStatsAction.Request for (int i = 0; i < jobIds.size(); i++) { int slot = i; String jobId = jobIds.get(i); - gatherDataCountsAndModelSizeStats(jobId, (dataCounts, modelSizeStats) -> { - JobState jobState = MlMetadata.getJobState(jobId, tasks); - PersistentTasksCustomMetaData.PersistentTask pTask = MlMetadata.getJobTask(jobId, tasks); - String assignmentExplanation = null; - if (pTask != null) { - assignmentExplanation = pTask.getAssignment().getExplanation(); - } - jobStats.set(slot, new GetJobsStatsAction.Response.JobStats(jobId, dataCounts, modelSizeStats, jobState, null, - assignmentExplanation, null)); - if (counter.decrementAndGet() == 0) { - List results = response.getResponse().results(); - results.addAll(jobStats.asList()); - listener.onResponse(new GetJobsStatsAction.Response(response.getTaskFailures(), response.getNodeFailures(), - new QueryPage<>(results, results.size(), Job.RESULTS_FIELD))); - } + gatherForecastStats(jobId, forecastStats -> { + gatherDataCountsAndModelSizeStats(jobId, (dataCounts, modelSizeStats) -> { + JobState jobState = MlMetadata.getJobState(jobId, tasks); + PersistentTasksCustomMetaData.PersistentTask pTask = MlMetadata.getJobTask(jobId, tasks); + String assignmentExplanation = null; + if (pTask != null) { + assignmentExplanation = pTask.getAssignment().getExplanation(); + } + jobStats.set(slot, new GetJobsStatsAction.Response.JobStats(jobId, dataCounts, modelSizeStats, forecastStats, jobState, + null, assignmentExplanation, null)); + if (counter.decrementAndGet() == 0) { + List results = response.getResponse().results(); + results.addAll(jobStats.asList()); + listener.onResponse(new GetJobsStatsAction.Response(response.getTaskFailures(), response.getNodeFailures(), + new QueryPage<>(results, results.size(), Job.RESULTS_FIELD))); + } + }, listener::onFailure); }, listener::onFailure); } } + void gatherForecastStats(String jobId, Consumer handler, Consumer errorHandler) { + jobProvider.getForecastStats(jobId, handler, errorHandler); + } + void gatherDataCountsAndModelSizeStats(String jobId, BiConsumer handler, Consumer errorHandler) { jobProvider.dataCounts(jobId, dataCounts -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java index 578ddd1efc78a..7513cb5a5bbc0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobProvider.java @@ -63,6 +63,9 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; +import org.elasticsearch.search.aggregations.metrics.stats.Stats; import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; @@ -93,6 +96,9 @@ import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.ModelPlot; import org.elasticsearch.xpack.core.ml.job.results.Result; +import org.elasticsearch.xpack.core.ml.stats.CountAccumulator; +import org.elasticsearch.xpack.core.ml.stats.ForecastStats; +import org.elasticsearch.xpack.core.ml.stats.StatsAccumulator; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlIndicesUtils; import org.elasticsearch.xpack.core.security.support.Exceptions; @@ -1112,6 +1118,53 @@ public void getForecastRequestStats(String jobId, String forecastId, Consumer handler.accept(result.result), errorHandler, () -> null); } + public void getForecastStats(String jobId, Consumer handler, Consumer errorHandler) { + String indexName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); + + QueryBuilder termQuery = new TermsQueryBuilder(Result.RESULT_TYPE.getPreferredName(), ForecastRequestStats.RESULT_TYPE_VALUE); + QueryBuilder jobQuery = new TermsQueryBuilder(Job.ID.getPreferredName(), jobId); + QueryBuilder finalQuery = new BoolQueryBuilder().filter(termQuery).filter(jobQuery); + + SearchRequest searchRequest = new SearchRequest(indexName); + searchRequest.indicesOptions(MlIndicesUtils.addIgnoreUnavailable(searchRequest.indicesOptions())); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(finalQuery); + sourceBuilder.aggregation( + AggregationBuilders.stats(ForecastStats.Fields.MEMORY).field(ForecastRequestStats.MEMORY_USAGE.getPreferredName())); + sourceBuilder.aggregation(AggregationBuilders.stats(ForecastStats.Fields.RECORDS) + .field(ForecastRequestStats.PROCESSED_RECORD_COUNT.getPreferredName())); + sourceBuilder.aggregation( + AggregationBuilders.stats(ForecastStats.Fields.RUNTIME).field(ForecastRequestStats.PROCESSING_TIME_MS.getPreferredName())); + sourceBuilder.aggregation( + AggregationBuilders.terms(ForecastStats.Fields.STATUSES).field(ForecastRequestStats.STATUS.getPreferredName())); + sourceBuilder.size(0); + + searchRequest.source(sourceBuilder); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, + ActionListener.wrap(searchResponse -> { + long totalHits = searchResponse.getHits().getTotalHits(); + Aggregations aggregations = searchResponse.getAggregations(); + if (totalHits == 0 || aggregations == null) { + handler.accept(new ForecastStats()); + return; + } + Map aggregationsAsMap = aggregations.asMap(); + StatsAccumulator memoryStats = StatsAccumulator + .fromStatsAggregation((Stats) aggregationsAsMap.get(ForecastStats.Fields.MEMORY)); + StatsAccumulator recordStats = StatsAccumulator + .fromStatsAggregation((Stats) aggregationsAsMap.get(ForecastStats.Fields.RECORDS)); + StatsAccumulator runtimeStats = StatsAccumulator + .fromStatsAggregation((Stats) aggregationsAsMap.get(ForecastStats.Fields.RUNTIME)); + CountAccumulator statusCount = CountAccumulator + .fromTermsAggregation((StringTerms) aggregationsAsMap.get(ForecastStats.Fields.STATUSES)); + + ForecastStats forecastStats = new ForecastStats(totalHits, memoryStats, recordStats, runtimeStats, statusCount); + handler.accept(forecastStats); + }, errorHandler), client::search); + + } + public void updateCalendar(String calendarId, Set jobIdsToAdd, Set jobIdsToRemove, Consumer handler, Consumer errorHandler) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/StatsAccumulator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/StatsAccumulator.java deleted file mode 100644 index 1f1df147d80a1..0000000000000 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/StatsAccumulator.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.utils; - -import java.util.HashMap; -import java.util.Map; - -/** - * Helper class to collect min, max, avg and total statistics for a quantity - */ -public class StatsAccumulator { - - private static final String MIN = "min"; - private static final String MAX = "max"; - private static final String AVG = "avg"; - private static final String TOTAL = "total"; - - private long count; - private double total; - private Double min; - private Double max; - - public void add(double value) { - count++; - total += value; - min = min == null ? value : (value < min ? value : min); - max = max == null ? value : (value > max ? value : max); - } - - public double getMin() { - return min == null ? 0.0 : min; - } - - public double getMax() { - return max == null ? 0.0 : max; - } - - public double getAvg() { - return count == 0.0 ? 0.0 : total/count; - } - - public double getTotal() { - return total; - } - - public Map asMap() { - Map map = new HashMap<>(); - map.put(MIN, getMin()); - map.put(MAX, getMax()); - map.put(AVG, getAvg()); - map.put(TOTAL, getTotal()); - return map; - } -} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java index eba2054054c0d..5893a863fe38f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java @@ -39,6 +39,8 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.stats.ForecastStats; +import org.elasticsearch.xpack.core.ml.stats.ForecastStatsTests; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.junit.Before; @@ -138,11 +140,11 @@ public void testUsage() throws Exception { settings.put("xpack.ml.enabled", true); Job opened1 = buildJob("opened1", Arrays.asList(buildMinDetector("foo"))); - GetJobsStatsAction.Response.JobStats opened1JobStats = buildJobStats("opened1", JobState.OPENED, 100L); + GetJobsStatsAction.Response.JobStats opened1JobStats = buildJobStats("opened1", JobState.OPENED, 100L, 3L); Job opened2 = buildJob("opened2", Arrays.asList(buildMinDetector("foo"), buildMinDetector("bar"))); - GetJobsStatsAction.Response.JobStats opened2JobStats = buildJobStats("opened2", JobState.OPENED, 200L); + GetJobsStatsAction.Response.JobStats opened2JobStats = buildJobStats("opened2", JobState.OPENED, 200L, 8L); Job closed1 = buildJob("closed1", Arrays.asList(buildMinDetector("foo"), buildMinDetector("bar"), buildMinDetector("foobar"))); - GetJobsStatsAction.Response.JobStats closed1JobStats = buildJobStats("closed1", JobState.CLOSED, 300L); + GetJobsStatsAction.Response.JobStats closed1JobStats = buildJobStats("closed1", JobState.CLOSED, 300L, 0); givenJobs(Arrays.asList(opened1, opened2, closed1), Arrays.asList(opened1JobStats, opened2JobStats, closed1JobStats)); @@ -210,6 +212,15 @@ public void testUsage() throws Exception { assertThat(source.getValue("datafeeds._all.count"), equalTo(3)); assertThat(source.getValue("datafeeds.started.count"), equalTo(2)); assertThat(source.getValue("datafeeds.stopped.count"), equalTo(1)); + + assertThat(source.getValue("jobs._all.forecasts.total"), equalTo(11)); + assertThat(source.getValue("jobs._all.forecasts.forecasted_jobs"), equalTo(2)); + + assertThat(source.getValue("jobs.closed.forecasts.total"), equalTo(0)); + assertThat(source.getValue("jobs.closed.forecasts.forecasted_jobs"), equalTo(0)); + + assertThat(source.getValue("jobs.opened.forecasts.total"), equalTo(11)); + assertThat(source.getValue("jobs.opened.forecasts.forecasted_jobs"), equalTo(2)); } } @@ -301,12 +312,16 @@ private static Job buildJob(String jobId, List detectors) { .build(new Date(randomNonNegativeLong())); } - private static GetJobsStatsAction.Response.JobStats buildJobStats(String jobId, JobState state, long modelBytes) { + private static GetJobsStatsAction.Response.JobStats buildJobStats(String jobId, JobState state, long modelBytes, + long numberOfForecasts) { ModelSizeStats.Builder modelSizeStats = new ModelSizeStats.Builder(jobId); modelSizeStats.setModelBytes(modelBytes); GetJobsStatsAction.Response.JobStats jobStats = mock(GetJobsStatsAction.Response.JobStats.class); + ForecastStats forecastStats = buildForecastStats(numberOfForecasts); + when(jobStats.getJobId()).thenReturn(jobId); when(jobStats.getModelSizeStats()).thenReturn(modelSizeStats.build()); + when(jobStats.getForecastStats()).thenReturn(forecastStats); when(jobStats.getState()).thenReturn(state); return jobStats; } @@ -316,4 +331,8 @@ private static GetDatafeedsStatsAction.Response.DatafeedStats buildDatafeedStats when(stats.getDatafeedState()).thenReturn(state); return stats; } + + private static ForecastStats buildForecastStats(long numberOfForecasts) { + return new ForecastStatsTests().createForecastStats(numberOfForecasts, numberOfForecasts); + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java index 40bc82c6048c7..2e00ad71251db 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java @@ -37,7 +37,7 @@ public void testDetermineJobIds() { result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, Collections.singletonList("id1"), Collections.singletonList( - new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, JobState.OPENED, null, null, null))); + new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, null, JobState.OPENED, null, null, null))); assertEquals(0, result.size()); result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, @@ -49,7 +49,7 @@ public void testDetermineJobIds() { result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, Arrays.asList("id1", "id2", "id3"), - Collections.singletonList(new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, + Collections.singletonList(new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, null, JobState.CLOSED, null, null, null)) ); assertEquals(2, result.size()); @@ -58,17 +58,16 @@ public void testDetermineJobIds() { result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, Arrays.asList("id1", "id2", "id3"), Arrays.asList( - new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, JobState.OPENED, null, null, null), - new GetJobsStatsAction.Response.JobStats("id3", new DataCounts("id3"), null, JobState.OPENED, null, null, null) + new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, null, JobState.OPENED, null, null, null), + new GetJobsStatsAction.Response.JobStats("id3", new DataCounts("id3"), null, null, JobState.OPENED, null, null, null) )); assertEquals(1, result.size()); assertEquals("id2", result.get(0)); - result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, Arrays.asList("id1", "id2", "id3"), - Arrays.asList( - new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, JobState.OPENED, null, null, null), - new GetJobsStatsAction.Response.JobStats("id2", new DataCounts("id2"), null, JobState.OPENED, null, null, null), - new GetJobsStatsAction.Response.JobStats("id3", new DataCounts("id3"), null, JobState.OPENED, null, null, null))); + result = determineNonDeletedJobIdsWithoutLiveStats(mlMetadata, Arrays.asList("id1", "id2", "id3"), Arrays.asList( + new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, null, JobState.OPENED, null, null, null), + new GetJobsStatsAction.Response.JobStats("id2", new DataCounts("id2"), null, null, JobState.OPENED, null, null, null), + new GetJobsStatsAction.Response.JobStats("id3", new DataCounts("id3"), null, null, JobState.OPENED, null, null, null))); assertEquals(0, result.size()); // No jobs running, but job 4 is being deleted diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/StatsAccumulatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/StatsAccumulatorTests.java deleted file mode 100644 index ae9b6a7360c13..0000000000000 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/StatsAccumulatorTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.utils; - -import org.elasticsearch.test.ESTestCase; - -import java.util.HashMap; -import java.util.Map; - -import static org.hamcrest.Matchers.equalTo; - -public class StatsAccumulatorTests extends ESTestCase { - - public void testGivenNoValues() { - StatsAccumulator accumulator = new StatsAccumulator(); - assertThat(accumulator.getMin(), equalTo(0.0)); - assertThat(accumulator.getMax(), equalTo(0.0)); - assertThat(accumulator.getTotal(), equalTo(0.0)); - assertThat(accumulator.getAvg(), equalTo(0.0)); - } - - public void testGivenPositiveValues() { - StatsAccumulator accumulator = new StatsAccumulator(); - - for (int i = 1; i <= 10; i++) { - accumulator.add(i); - } - - assertThat(accumulator.getMin(), equalTo(1.0)); - assertThat(accumulator.getMax(), equalTo(10.0)); - assertThat(accumulator.getTotal(), equalTo(55.0)); - assertThat(accumulator.getAvg(), equalTo(5.5)); - } - - public void testGivenNegativeValues() { - StatsAccumulator accumulator = new StatsAccumulator(); - - for (int i = 1; i <= 10; i++) { - accumulator.add(-1 * i); - } - - assertThat(accumulator.getMin(), equalTo(-10.0)); - assertThat(accumulator.getMax(), equalTo(-1.0)); - assertThat(accumulator.getTotal(), equalTo(-55.0)); - assertThat(accumulator.getAvg(), equalTo(-5.5)); - } - - public void testAsMap() { - StatsAccumulator accumulator = new StatsAccumulator(); - accumulator.add(5.0); - accumulator.add(10.0); - - Map expectedMap = new HashMap<>(); - expectedMap.put("min", 5.0); - expectedMap.put("max", 10.0); - expectedMap.put("avg", 7.5); - expectedMap.put("total", 15.0); - assertThat(accumulator.asMap(), equalTo(expectedMap)); - } -} \ No newline at end of file diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java index 88f34c4577c1c..9d37073a426cc 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsMonitoringDocTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.stats.ForecastStats; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.exporter.BaseMonitoringDocTestCase; @@ -100,7 +101,9 @@ public void testToXContent() throws IOException { .build(); final DataCounts dataCounts = new DataCounts("_job_id", 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, date3, date4, date5, date6, date7); - final JobStats jobStats = new JobStats("_job", dataCounts, modelStats, JobState.OPENED, discoveryNode, "_explanation", time); + final ForecastStats forecastStats = new ForecastStats(); + final JobStats jobStats = new JobStats("_job", dataCounts, modelStats, forecastStats, JobState.OPENED, discoveryNode, + "_explanation", time); final MonitoringDoc.Node node = new MonitoringDoc.Node("_uuid", "_host", "_addr", "_ip", "_name", 1504169190855L); final JobStatsMonitoringDoc document = new JobStatsMonitoringDoc("_cluster", 1502266739402L, 1506593717631L, node, jobStats); @@ -152,6 +155,9 @@ public void testToXContent() throws IOException { + "\"log_time\":1483315322002," + "\"timestamp\":1483228861001" + "}," + + "\"forecasts_stats\":{" + + "\"total\":0,\"forecasted_jobs\":0" + + "}," + "\"state\":\"opened\"," + "\"node\":{" + "\"id\":\"_node_id\"," From 896317fe36d8dbc236a837fb07704fb8e26dcd4d Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 4 Jul 2018 10:25:00 +0100 Subject: [PATCH 32/36] [ML] Limit ML filter items to 10K (#31731) Add hard limit to the number of items a filter may have. This serves to protect from excessive overhead due to the filters taking too much memory or lookups becoming too expensive. --- .../xpack/core/ml/job/config/MlFilter.java | 15 ++++++++++- .../xpack/core/ml/job/messages/Messages.java | 1 + .../core/ml/job/config/MlFilterTests.java | 27 +++++++++++++++++-- 3 files changed, 40 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java index c55ba401a2f0a..48051fa4733ff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -29,6 +29,15 @@ public class MlFilter implements ToXContentObject, Writeable { + /** + * The max number of items allowed per filter. + * Limiting the number of items protects users + * from running into excessive overhead due to + * filters using too much memory and lookups + * becoming too expensive. + */ + private static final int MAX_ITEMS = 10000; + public static final String DOCUMENT_ID_PREFIX = "filter_"; public static final String FILTER_TYPE = "filter"; @@ -62,7 +71,7 @@ private static ObjectParser createParser(boolean ignoreUnknownFie private MlFilter(String id, String description, SortedSet items) { this.id = Objects.requireNonNull(id); this.description = description; - this.items = Objects.requireNonNull(items, ITEMS.getPreferredName() + " must not be null"); + this.items = Objects.requireNonNull(items); } public MlFilter(StreamInput in) throws IOException { @@ -182,9 +191,13 @@ public Builder setItems(String... items) { public MlFilter build() { ExceptionsHelper.requireNonNull(id, MlFilter.ID.getPreferredName()); + ExceptionsHelper.requireNonNull(items, MlFilter.ITEMS.getPreferredName()); if (!MlStrings.isValidId(id)) { throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.INVALID_ID, ID.getPreferredName(), id)); } + if (items.size() > MAX_ITEMS) { + throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.FILTER_CONTAINS_TOO_MANY_ITEMS, id, MAX_ITEMS)); + } return new MlFilter(id, description, items); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index f0329051fed95..259d2d06a9c6e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -43,6 +43,7 @@ public final class Messages { "Datafeed frequency [{0}] must be a multiple of the aggregation interval [{1}]"; public static final String FILTER_NOT_FOUND = "No filter with id [{0}] exists"; + public static final String FILTER_CONTAINS_TOO_MANY_ITEMS = "Filter [{0}] contains too many items; up to [{1}] items are allowed"; public static final String INCONSISTENT_ID = "Inconsistent {0}; ''{1}'' specified in the body differs from ''{2}'' specified as a URL argument"; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java index a89250330f046..45ba47281a2a1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java @@ -13,6 +13,8 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.SortedSet; import java.util.TreeSet; @@ -71,9 +73,9 @@ public void testNullId() { } public void testNullItems() { - NullPointerException ex = expectThrows(NullPointerException.class, + Exception ex = expectThrows(IllegalArgumentException.class, () -> MlFilter.builder(randomValidFilterId()).setItems((SortedSet) null).build()); - assertEquals(MlFilter.ITEMS.getPreferredName() + " must not be null", ex.getMessage()); + assertEquals("[items] must not be null.", ex.getMessage()); } public void testDocumentId() { @@ -102,6 +104,27 @@ public void testInvalidId() { assertThat(e.getMessage(), startsWith("Invalid filter_id; 'Invalid id' can contain lowercase")); } + public void testTooManyItems() { + List items = new ArrayList<>(10001); + for (int i = 0; i < 10001; ++i) { + items.add("item_" + i); + } + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> MlFilter.builder("huge").setItems(items).build()); + assertThat(e.getMessage(), startsWith("Filter [huge] contains too many items")); + } + + public void testGivenItemsAreMaxAllowed() { + List items = new ArrayList<>(10000); + for (int i = 0; i < 10000; ++i) { + items.add("item_" + i); + } + + MlFilter hugeFilter = MlFilter.builder("huge").setItems(items).build(); + + assertThat(hugeFilter.getItems().size(), equalTo(items.size())); + } + public void testItemsAreSorted() { MlFilter filter = MlFilter.builder("foo").setItems("c", "b", "a").build(); assertThat(filter.getItems(), contains("a", "b", "c")); From 3f2a241b7f069b2a4c274dcc0b9b7903a5634933 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 4 Jul 2018 11:32:35 +0200 Subject: [PATCH 33/36] Detach Transport from TransportService (#31727) Today TransportService is tightly coupled with Transport since it requires an instance of TransportService in order to receive responses and send requests. This is mainly due to the Request and Response handlers being maintained in TransportService but also because of the lack of a proper callback interface. This change moves request handler registry and response handler registration into Transport and adds all necessary methods to `TransportConnectionListener` in order to remove the `TransportService` dependency from `Transport` Transport now accepts one or more `TransportConnectionListener` instances that are executed sequentially in a blocking fashion. --- .../netty4/Netty4ScheduledPingTests.java | 4 +- .../discovery/zen/FaultDetection.java | 18 +- .../elasticsearch/transport/TcpTransport.java | 137 +++++-- .../elasticsearch/transport/Transport.java | 150 +++++++- .../TransportConnectionListener.java | 65 +++- .../transport/TransportService.java | 357 ++++++++---------- .../node/tasks/CancellableTasksTests.java | 2 +- .../node/tasks/TransportTasksActionTests.java | 28 +- .../action/main/MainActionTests.java | 5 +- .../search/MultiSearchActionTookTests.java | 9 +- .../TransportMultiSearchActionTests.java | 6 +- .../TransportMasterNodeActionTests.java | 26 +- .../BroadcastReplicationTests.java | 2 +- .../TransportReplicationActionTests.java | 42 ++- .../TransportWriteActionTests.java | 11 +- .../transport/FailAndRetryMockTransport.java | 52 ++- .../cluster/NodeConnectionsServiceTests.java | 45 ++- .../indices/cluster/ClusterStateChanges.java | 2 +- ...ClusterStateServiceRandomUpdatesTests.java | 3 +- .../transport/ActionNamesIT.java | 54 --- .../transport/TransportActionProxyTests.java | 28 +- .../elasticsearch/test/ESIntegTestCase.java | 26 ++ .../test/transport/CapturingTransport.java | 62 ++- .../test/transport/MockTransportService.java | 42 ++- .../AbstractSimpleTransportTestCase.java | 165 ++++---- .../action/TransportXPackInfoActionTests.java | 5 +- .../SecurityServerTransportServiceTests.java | 18 +- .../role/TransportDeleteRoleActionTests.java | 13 +- .../role/TransportGetRolesActionTests.java | 17 +- .../role/TransportPutRoleActionTests.java | 13 +- .../TransportGetRoleMappingsActionTests.java | 5 +- .../TransportPutRoleMappingActionTests.java | 5 +- ...sportSamlInvalidateSessionActionTests.java | 5 +- .../saml/TransportSamlLogoutActionTests.java | 3 +- .../TransportAuthenticateActionTests.java | 13 +- .../TransportChangePasswordActionTests.java | 18 +- .../user/TransportDeleteUserActionTests.java | 21 +- .../user/TransportGetUsersActionTests.java | 17 +- .../TransportHasPrivilegesActionTests.java | 8 +- .../user/TransportPutUserActionTests.java | 21 +- .../user/TransportSetEnabledActionTests.java | 21 +- 41 files changed, 920 insertions(+), 624 deletions(-) delete mode 100644 server/src/test/java/org/elasticsearch/transport/ActionNamesIT.java diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java index bd62ff0af0b5a..01c5f5b617077 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java @@ -89,7 +89,7 @@ public void testScheduledPing() throws Exception { assertThat(nettyA.getPing().getFailedPings(), equalTo(0L)); assertThat(nettyB.getPing().getFailedPings(), equalTo(0L)); - serviceA.registerRequestHandler("sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(TransportRequest.Empty request, TransportChannel channel, Task task) { @@ -104,7 +104,7 @@ public void messageReceived(TransportRequest.Empty request, TransportChannel cha int rounds = scaledRandomIntBetween(100, 5000); for (int i = 0; i < rounds; i++) { - serviceB.submitRequest(nodeA, "sayHello", + serviceB.submitRequest(nodeA, "internal:sayHello", TransportRequest.Empty.INSTANCE, TransportRequestOptions.builder().withCompress(randomBoolean()).build(), new TransportResponseHandler() { @Override diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java index 715e8be03efb2..5d9b1687e4295 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportService; @@ -93,13 +94,20 @@ public void close() { abstract void handleTransportDisconnect(DiscoveryNode node); private class FDConnectionListener implements TransportConnectionListener { - @Override - public void onNodeConnected(DiscoveryNode node) { - } - @Override public void onNodeDisconnected(DiscoveryNode node) { - handleTransportDisconnect(node); + AbstractRunnable runnable = new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + logger.warn("failed to handle transport disconnect for node: {}", node); + } + + @Override + protected void doRun() { + handleTransportDisconnect(node); + } + }; + threadPool.generic().execute(runnable); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index c8f256c2db89a..b2294ce589325 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.compress.Compressor; @@ -98,10 +99,10 @@ import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -205,7 +206,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements protected final NetworkService networkService; protected final Set profileSettings; - private volatile TransportService transportService; + private final DelegatingTransportConnectionListener transportListener = new DelegatingTransportConnectionListener(); private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); // node id to actual channel @@ -225,12 +226,13 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements protected final ConnectionProfile defaultConnectionProfile; private final ConcurrentMap pendingHandshakes = new ConcurrentHashMap<>(); - private final AtomicLong requestIdGenerator = new AtomicLong(); private final CounterMetric numHandshakes = new CounterMetric(); private static final String HANDSHAKE_ACTION_NAME = "internal:tcp/handshake"; private final MeanMetric readBytesMetric = new MeanMetric(); private final MeanMetric transmittedBytesMetric = new MeanMetric(); + private volatile Map requestHandlers = Collections.emptyMap(); + private final ResponseHandlers responseHandlers = new ResponseHandlers(); public TcpTransport(String transportName, Settings settings, ThreadPool threadPool, BigArrays bigArrays, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, @@ -287,6 +289,16 @@ protected void doStart() { } } + @Override + public void addConnectionListener(TransportConnectionListener listener) { + transportListener.listeners.add(listener); + } + + @Override + public boolean removeConnectionListener(TransportConnectionListener listener) { + return transportListener.listeners.remove(listener); + } + @Override public CircuitBreaker getInFlightRequestBreaker() { // We always obtain a fresh breaker to reflect changes to the breaker configuration. @@ -294,11 +306,11 @@ public CircuitBreaker getInFlightRequestBreaker() { } @Override - public void setTransportService(TransportService service) { - if (service.getRequestHandler(HANDSHAKE_ACTION_NAME) != null) { - throw new IllegalStateException(HANDSHAKE_ACTION_NAME + " is a reserved request handler and must not be registered"); + public synchronized void registerRequestHandler(RequestHandlerRegistry reg) { + if (requestHandlers.containsKey(reg.getAction())) { + throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); } - this.transportService = service; + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); } private static class HandshakeResponseHandler implements TransportResponseHandler { @@ -482,7 +494,7 @@ public void close() { boolean block = lifecycle.stopped() && Transports.isTransportThread(Thread.currentThread()) == false; CloseableChannel.closeChannels(channels, block); } finally { - transportService.onConnectionClosed(this); + transportListener.onConnectionClosed(this); } } } @@ -538,7 +550,7 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil logger.debug("connected to node [{}]", node); } try { - transportService.onNodeConnected(node); + transportListener.onNodeConnected(node); } finally { if (nodeChannels.isClosed()) { // we got closed concurrently due to a disconnect or some other event on the channel. @@ -550,7 +562,7 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil // try to remove it first either way one of the two wins even if the callback has run before we even added the // tuple to the map since in that case we remove it here again if (connectedNodes.remove(node, nodeChannels)) { - transportService.onNodeDisconnected(node); + transportListener.onNodeDisconnected(node); } throw new NodeNotConnectedException(node, "connection concurrently closed"); } @@ -652,7 +664,7 @@ public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile c // At this point we should construct the connection, notify the transport service, and attach close listeners to the // underlying channels. nodeChannels = new NodeChannels(node, channels, connectionProfile, version); - transportService.onConnectionOpened(nodeChannels); + transportListener.onConnectionOpened(nodeChannels); final NodeChannels finalNodeChannels = nodeChannels; final AtomicBoolean runOnce = new AtomicBoolean(false); Consumer onClose = c -> { @@ -695,7 +707,7 @@ private void disconnectFromNodeCloseAndNotify(DiscoveryNode node, NodeChannels n if (closeLock.readLock().tryLock()) { try { if (connectedNodes.remove(node, nodeChannels)) { - transportService.onNodeDisconnected(node); + transportListener.onNodeDisconnected(node); } } finally { closeLock.readLock().unlock(); @@ -722,7 +734,7 @@ public void disconnectFromNode(DiscoveryNode node) { } finally { closeLock.readLock().unlock(); if (nodeChannels != null) { // if we found it and removed it we close and notify - IOUtils.closeWhileHandlingException(nodeChannels, () -> transportService.onNodeDisconnected(node)); + IOUtils.closeWhileHandlingException(nodeChannels, () -> transportListener.onNodeDisconnected(node)); } } } @@ -979,7 +991,7 @@ protected final void doStop() { Map.Entry next = iterator.next(); try { IOUtils.closeWhileHandlingException(next.getValue()); - transportService.onNodeDisconnected(next.getKey()); + transportListener.onNodeDisconnected(next.getKey()); } finally { iterator.remove(); } @@ -1133,7 +1145,7 @@ private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel cha final TransportRequestOptions finalOptions = options; // this might be called in a different thread SendListener onRequestSent = new SendListener(channel, stream, - () -> transportService.onRequestSent(node, requestId, action, request, finalOptions), message.length()); + () -> transportListener.onRequestSent(node, requestId, action, request, finalOptions), message.length()); internalSendMessage(channel, message, onRequestSent); addedReleaseListener = true; } finally { @@ -1187,7 +1199,7 @@ public void sendErrorResponse( final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); CompositeBytesReference message = new CompositeBytesReference(header, bytes); SendListener onResponseSent = new SendListener(channel, null, - () -> transportService.onResponseSent(requestId, action, error), message.length()); + () -> transportListener.onResponseSent(requestId, action, error), message.length()); internalSendMessage(channel, message, onResponseSent); } } @@ -1236,7 +1248,7 @@ private void sendResponse( final TransportResponseOptions finalOptions = options; // this might be called in a different thread SendListener listener = new SendListener(channel, stream, - () -> transportService.onResponseSent(requestId, action, response, finalOptions), message.length()); + () -> transportListener.onResponseSent(requestId, action, response, finalOptions), message.length()); internalSendMessage(channel, message, listener); addedReleaseListener = true; } finally { @@ -1492,7 +1504,7 @@ public final void messageReceived(BytesReference reference, TcpChannel channel) if (isHandshake) { handler = pendingHandshakes.remove(requestId); } else { - TransportResponseHandler theHandler = transportService.onResponseReceived(requestId); + TransportResponseHandler theHandler = responseHandlers.onResponseReceived(requestId, transportListener); if (theHandler == null && TransportStatus.isError(status)) { handler = pendingHandshakes.remove(requestId); } else { @@ -1599,7 +1611,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str features = Collections.emptySet(); } final String action = stream.readString(); - transportService.onRequestReceived(requestId, action); + transportListener.onRequestReceived(requestId, action); TransportChannel transportChannel = null; try { if (TransportStatus.isHandshake(status)) { @@ -1607,7 +1619,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str sendResponse(version, features, channel, response, requestId, HANDSHAKE_ACTION_NAME, TransportResponseOptions.EMPTY, TransportStatus.setHandshake((byte) 0)); } else { - final RequestHandlerRegistry reg = transportService.getRequestHandler(action); + final RequestHandlerRegistry reg = getRequestHandler(action); if (reg == null) { throw new ActionNotFoundTransportException(action); } @@ -1714,7 +1726,7 @@ public void writeTo(StreamOutput out) throws IOException { protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException, InterruptedException { numHandshakes.inc(); - final long requestId = newRequestId(); + final long requestId = responseHandlers.newRequestId(); final HandshakeResponseHandler handler = new HandshakeResponseHandler(channel); AtomicReference versionRef = handler.versionRef; AtomicReference exceptionRef = handler.exceptionRef; @@ -1764,11 +1776,6 @@ final long getNumHandshakes() { return numHandshakes.count(); // for testing } - @Override - public long newRequestId() { - return requestIdGenerator.incrementAndGet(); - } - /** * Called once the channel is closed for instance due to a disconnect or a closed socket etc. */ @@ -1912,4 +1919,82 @@ public ProfileSettings(Settings settings, String profileName) { PUBLISH_PORT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); } } + + private static final class DelegatingTransportConnectionListener implements TransportConnectionListener { + private final List listeners = new CopyOnWriteArrayList<>(); + + @Override + public void onRequestReceived(long requestId, String action) { + for (TransportConnectionListener listener : listeners) { + listener.onRequestReceived(requestId, action); + } + } + + @Override + public void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) { + for (TransportConnectionListener listener : listeners) { + listener.onResponseSent(requestId, action, response, finalOptions); + } + } + + @Override + public void onResponseSent(long requestId, String action, Exception error) { + for (TransportConnectionListener listener : listeners) { + listener.onResponseSent(requestId, action, error); + } + } + + @Override + public void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions finalOptions) { + for (TransportConnectionListener listener : listeners) { + listener.onRequestSent(node, requestId, action, request, finalOptions); + } + } + + @Override + public void onNodeDisconnected(DiscoveryNode key) { + for (TransportConnectionListener listener : listeners) { + listener.onNodeDisconnected(key); + } + } + + @Override + public void onConnectionOpened(Connection nodeChannels) { + for (TransportConnectionListener listener : listeners) { + listener.onConnectionOpened(nodeChannels); + } + } + + @Override + public void onNodeConnected(DiscoveryNode node) { + for (TransportConnectionListener listener : listeners) { + listener.onNodeConnected(node); + } + } + + @Override + public void onConnectionClosed(Connection nodeChannels) { + for (TransportConnectionListener listener : listeners) { + listener.onConnectionClosed(nodeChannels); + } + } + + @Override + public void onResponseReceived(long requestId, ResponseContext holder) { + for (TransportConnectionListener listener : listeners) { + listener.onResponseReceived(requestId, holder); + } + } + } + + @Override + public final ResponseHandlers getResponseHandlers() { + return responseHandlers; + } + + @Override + public final RequestHandlerRegistry getRequestHandler(String action) { + return requestHandlers.get(action); + } } diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java index 6ef698f1740b3..74235479657bf 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transport.java +++ b/server/src/main/java/org/elasticsearch/transport/Transport.java @@ -29,18 +29,45 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import java.io.Closeable; import java.io.IOException; import java.net.UnknownHostException; +import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Predicate; public interface Transport extends LifecycleComponent { Setting TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, Property.NodeScope); - void setTransportService(TransportService service); + /** + * Registers a new request handler + */ + void registerRequestHandler(RequestHandlerRegistry reg); + + /** + * Returns the registered request handler registry for the given action or null if it's not registered + * @param action the action to look up + */ + RequestHandlerRegistry getRequestHandler(String action); + + /** + * Adds a new event listener + * @param listener the listener to add + */ + void addConnectionListener(TransportConnectionListener listener); + + /** + * Removes an event listener + * @param listener the listener to remove + * @return true iff the listener was removed otherwise false + */ + boolean removeConnectionListener(TransportConnectionListener listener); /** * The address the transport is bound on. @@ -75,17 +102,15 @@ void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfile, */ void disconnectFromNode(DiscoveryNode node); + /** + * Returns a list of all local adresses for this transport + */ List getLocalAddresses(); default CircuitBreaker getInFlightRequestBreaker() { return new NoopCircuitBreaker("in-flight-noop"); } - /** - * Returns a new request ID to use when sending a message via {@link Connection#sendRequest(long, String, - * TransportRequest, TransportRequestOptions)} - */ - long newRequestId(); /** * Returns a connection for the given node if the node is connected. * Connections returned from this method must not be closed. The lifecycle of this connection is maintained by the Transport @@ -107,6 +132,8 @@ default CircuitBreaker getInFlightRequestBreaker() { TransportStats getStats(); + ResponseHandlers getResponseHandlers(); + /** * A unidirectional connection to a {@link DiscoveryNode} */ @@ -118,6 +145,10 @@ interface Connection extends Closeable { /** * Sends the request to the node this connection is associated with + * @param requestId see {@link ResponseHandlers#add(ResponseContext)} for details + * @param action the action to execute + * @param request the request to send + * @param options request options to apply * @throws NodeNotConnectedException if the given node is not connected */ void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws @@ -138,4 +169,111 @@ default Object getCacheKey() { return this; } } + + /** + * This class represents a response context that encapsulates the actual response handler, the action and the conneciton it was + * executed on. + */ + final class ResponseContext { + + private final TransportResponseHandler handler; + + private final Connection connection; + + private final String action; + + ResponseContext(TransportResponseHandler handler, Connection connection, String action) { + this.handler = handler; + this.connection = connection; + this.action = action; + } + + public TransportResponseHandler handler() { + return handler; + } + + public Connection connection() { + return this.connection; + } + + public String action() { + return this.action; + } + } + + /** + * This class is a registry that allows + */ + final class ResponseHandlers { + private final ConcurrentMapLong handlers = ConcurrentCollections + .newConcurrentMapLongWithAggressiveConcurrency(); + private final AtomicLong requestIdGenerator = new AtomicLong(); + + /** + * Returns true if the give request ID has a context associated with it. + */ + public boolean contains(long requestId) { + return handlers.containsKey(requestId); + } + + /** + * Removes and return the {@link ResponseContext} for the given request ID or returns + * null if no context is associated with this request ID. + */ + public ResponseContext remove(long requestId) { + return handlers.remove(requestId); + } + + /** + * Adds a new response context and associates it with a new request ID. + * @return the new request ID + * @see Connection#sendRequest(long, String, TransportRequest, TransportRequestOptions) + */ + public long add(ResponseContext holder) { + long requestId = newRequestId(); + ResponseContext existing = handlers.put(requestId, holder); + assert existing == null : "request ID already in use: " + requestId; + return requestId; + } + + /** + * Returns a new request ID to use when sending a message via {@link Connection#sendRequest(long, String, + * TransportRequest, TransportRequestOptions)} + */ + long newRequestId() { + return requestIdGenerator.incrementAndGet(); + } + + /** + * Removes and returns all {@link ResponseContext} instances that match the predicate + */ + public List prune(Predicate predicate) { + final List holders = new ArrayList<>(); + for (Map.Entry entry : handlers.entrySet()) { + ResponseContext holder = entry.getValue(); + if (predicate.test(holder)) { + ResponseContext remove = handlers.remove(entry.getKey()); + if (remove != null) { + holders.add(holder); + } + } + } + return holders; + } + + /** + * called by the {@link Transport} implementation when a response or an exception has been received for a previously + * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not + * found. + */ + public TransportResponseHandler onResponseReceived(final long requestId, TransportConnectionListener listener) { + ResponseContext context = handlers.remove(requestId); + listener.onResponseReceived(requestId, context); + if (context == null) { + return null; + } else { + return context.handler(); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java b/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java index de767986b9f09..0ee2ed5828d44 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportConnectionListener.java @@ -21,26 +21,75 @@ import org.elasticsearch.cluster.node.DiscoveryNode; +/** + * A listener interface that allows to react on transport events. All methods may be + * executed on network threads. Consumers must fork in the case of long running or blocking + * operations. + */ public interface TransportConnectionListener { /** - * Called once a node connection is opened and registered. + * Called once a request is received + * @param requestId the internal request ID + * @param action the request action + * */ - default void onNodeConnected(DiscoveryNode node) {} + default void onRequestReceived(long requestId, String action) {} /** - * Called once a node connection is closed and unregistered. + * Called for every action response sent after the response has been passed to the underlying network implementation. + * @param requestId the request ID (unique per client) + * @param action the request action + * @param response the response send + * @param finalOptions the response options */ - default void onNodeDisconnected(DiscoveryNode node) {} + default void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) {} + + /*** + * Called for every failed action response after the response has been passed to the underlying network implementation. + * @param requestId the request ID (unique per client) + * @param action the request action + * @param error the error sent back to the caller + */ + default void onResponseSent(long requestId, String action, Exception error) {} /** - * Called once a node connection is closed. The connection might not have been registered in the - * transport as a shared connection to a specific node + * Called for every request sent to a server after the request has been passed to the underlying network implementation + * @param node the node the request was sent to + * @param requestId the internal request id + * @param action the action name + * @param request the actual request + * @param finalOptions the request options */ - default void onConnectionClosed(Transport.Connection connection) {} + default void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions finalOptions) {} /** - * Called once a node connection is opened. + * Called once a connection was opened + * @param connection the connection */ default void onConnectionOpened(Transport.Connection connection) {} + + /** + * Called once a connection ws closed. + * @param connection the closed connection + */ + default void onConnectionClosed(Transport.Connection connection) {} + + /** + * Called for every response received + * @param requestId the request id for this reponse + * @param context the response context or null if the context was already processed ie. due to a timeout. + */ + default void onResponseReceived(long requestId, Transport.ResponseContext context) {} + + /** + * Called once a node connection is opened and registered. + */ + default void onNodeConnected(DiscoveryNode node) {} + + /** + * Called once a node connection is closed and unregistered. + */ + default void onNodeDisconnected(DiscoveryNode node) {} } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 8d3929cd6615a..032258101f730 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -45,8 +44,6 @@ import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -59,24 +56,23 @@ import java.net.UnknownHostException; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; -import java.util.stream.Stream; import static java.util.Collections.emptyList; import static org.elasticsearch.common.settings.Setting.listSetting; -public class TransportService extends AbstractLifecycleComponent { +public class TransportService extends AbstractLifecycleComponent implements TransportConnectionListener { public static final String DIRECT_RESPONSE_PROFILE = ".direct"; public static final String HANDSHAKE_ACTION_NAME = "internal:transport/handshake"; @@ -89,14 +85,7 @@ public class TransportService extends AbstractLifecycleComponent { private final TransportInterceptor.AsyncSender asyncSender; private final Function localNodeFactory; private final boolean connectToRemoteCluster; - - volatile Map requestHandlers = Collections.emptyMap(); - final Object requestHandlerMutex = new Object(); - - final ConcurrentMapLong clientHandlers = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); - - final CopyOnWriteArrayList connectionListeners = new CopyOnWriteArrayList<>(); - + private final Transport.ResponseHandlers responseHandlers; private final TransportInterceptor interceptor; // An LRU (don't really care about concurrency here) that holds the latest timed out requests so if they @@ -138,12 +127,12 @@ public DiscoveryNode getNode() { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { + throws TransportException { sendLocalRequest(requestId, action, request, options); } @Override - public void close() throws IOException { + public void close() { } }; @@ -172,6 +161,7 @@ public TransportService(Settings settings, Transport transport, ThreadPool threa this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); this.connectToRemoteCluster = RemoteClusterService.ENABLE_REMOTE_CLUSTERS.get(settings); remoteClusterService = new RemoteClusterService(settings, this); + responseHandlers = transport.getResponseHandlers(); if (clusterSettings != null) { clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude); @@ -179,6 +169,13 @@ public TransportService(Settings settings, Transport transport, ThreadPool threa remoteClusterService.listenForUpdates(clusterSettings); } } + registerRequestHandler( + HANDSHAKE_ACTION_NAME, + () -> HandshakeRequest.INSTANCE, + ThreadPool.Names.SAME, + false, false, + (request, channel, task) -> channel.sendResponse( + new HandshakeResponse(localNode, clusterName, localNode.getVersion()))); } public RemoteClusterService getRemoteClusterService() { @@ -202,7 +199,7 @@ protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool * * @return the executor service */ - protected ExecutorService getExecutorService() { + private ExecutorService getExecutorService() { return threadPool.generic(); } @@ -216,9 +213,8 @@ void setTracerLogExclude(List tracerLogExclude) { @Override protected void doStart() { - transport.setTransportService(this); + transport.addConnectionListener(this); transport.start(); - if (transport.boundAddress() != null && logger.isInfoEnabled()) { logger.info("{}", transport.boundAddress()); for (Map.Entry entry : transport.profileBoundAddresses().entrySet()) { @@ -226,13 +222,7 @@ protected void doStart() { } } localNode = localNodeFactory.apply(transport.boundAddress()); - registerRequestHandler( - HANDSHAKE_ACTION_NAME, - () -> HandshakeRequest.INSTANCE, - ThreadPool.Names.SAME, - false, false, - (request, channel, task) -> channel.sendResponse( - new HandshakeResponse(localNode, clusterName, localNode.getVersion()))); + if (connectToRemoteCluster) { // here we start to connect to the remote clusters remoteClusterService.initializeRemoteClusters(); @@ -246,36 +236,33 @@ protected void doStop() { } finally { // in case the transport is not connected to our local node (thus cleaned on node disconnect) // make sure to clean any leftover on going handles - for (Map.Entry entry : clientHandlers.entrySet()) { - final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); - if (holderToNotify != null) { - // callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows - getExecutorService().execute(new AbstractRunnable() { - @Override - public void onRejection(Exception e) { - // if we get rejected during node shutdown we don't wanna bubble it up - logger.debug( - () -> new ParameterizedMessage( - "failed to notify response handler on rejection, action: {}", - holderToNotify.action()), - e); - } - @Override - public void onFailure(Exception e) { - logger.warn( - () -> new ParameterizedMessage( - "failed to notify response handler on exception, action: {}", - holderToNotify.action()), - e); - } - @Override - public void doRun() { - TransportException ex = new TransportException("transport stopped, action: " + holderToNotify.action()); - holderToNotify.handler().handleException(ex); - } - }); - } + for (final Transport.ResponseContext holderToNotify : responseHandlers.prune(h -> true)) { + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + getExecutorService().execute(new AbstractRunnable() { + @Override + public void onRejection(Exception e) { + // if we get rejected during node shutdown we don't wanna bubble it up + logger.debug( + () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + holderToNotify.action()), + e); + } + @Override + public void onFailure(Exception e) { + logger.warn( + () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + holderToNotify.action()), + e); + } + @Override + public void doRun() { + TransportException ex = new TransportException("transport stopped, action: " + holderToNotify.action()); + holderToNotify.handler().handleException(ex); + } + }); } } } @@ -479,11 +466,11 @@ public void disconnectFromNode(DiscoveryNode node) { } public void addConnectionListener(TransportConnectionListener listener) { - connectionListeners.add(listener); + transport.addConnectionListener(listener); } public void removeConnectionListener(TransportConnectionListener listener) { - connectionListeners.remove(listener); + transport.removeConnectionListener(listener); } public TransportFuture submitRequest(DiscoveryNode node, String action, TransportRequest request, @@ -594,18 +581,19 @@ private void sendRequestInternal(final Transport.C throw new IllegalStateException("can't send request to a null connection"); } DiscoveryNode node = connection.getNode(); - final long requestId = transport.newRequestId(); + + Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); + ContextRestoreResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); + // TODO we can probably fold this entire request ID dance into connection.sendReqeust but it will be a bigger refactoring + final long requestId = responseHandlers.add(new Transport.ResponseContext<>(responseHandler, connection, action)); final TimeoutHandler timeoutHandler; + if (options.timeout() != null) { + timeoutHandler = new TimeoutHandler(requestId, connection.getNode(), action); + responseHandler.setTimeoutHandler(timeoutHandler); + } else { + timeoutHandler = null; + } try { - - if (options.timeout() == null) { - timeoutHandler = null; - } else { - timeoutHandler = new TimeoutHandler(requestId); - } - Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); - TransportResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); - clientHandlers.put(requestId, new RequestHolder<>(responseHandler, connection, action, timeoutHandler)); if (lifecycle.stoppedOrClosed()) { // if we are not started the exception handling will remove the RequestHolder again and calls the handler to notify // the caller. It will only notify if the toStop code hasn't done the work yet. @@ -619,10 +607,12 @@ private void sendRequestInternal(final Transport.C } catch (final Exception e) { // usually happen either because we failed to connect to the node // or because we failed serializing the message - final RequestHolder holderToNotify = clientHandlers.remove(requestId); + final Transport.ResponseContext contextToNotify = responseHandlers.remove(requestId); // If holderToNotify == null then handler has already been taken care of. - if (holderToNotify != null) { - holderToNotify.cancelTimeout(); + if (contextToNotify != null) { + if (timeoutHandler != null) { + timeoutHandler.cancel(); + } // callback that an exception happened, but on a different thread since we don't // want handlers to worry about stack overflows final SendRequestTransportException sendRequestException = new SendRequestTransportException(node, action, e); @@ -633,7 +623,7 @@ public void onRejection(Exception e) { logger.debug( () -> new ParameterizedMessage( "failed to notify response handler on rejection, action: {}", - holderToNotify.action()), + contextToNotify.action()), e); } @Override @@ -641,12 +631,12 @@ public void onFailure(Exception e) { logger.warn( () -> new ParameterizedMessage( "failed to notify response handler on exception, action: {}", - holderToNotify.action()), + contextToNotify.action()), e); } @Override protected void doRun() throws Exception { - holderToNotify.handler().handleException(sendRequestException); + contextToNotify.handler().handleException(sendRequestException); } }); } else { @@ -722,6 +712,44 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi return transport.addressesFromString(address, perAddressLimit); } + /** + * A set of all valid action prefixes. + */ + public static final Set VALID_ACTION_PREFIXES = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( + "indices:admin", + "indices:monitor", + "indices:data/write", + "indices:data/read", + "indices:internal", + "cluster:admin", + "cluster:monitor", + "cluster:internal", + "internal:" + ))); + + private void validateActionName(String actionName) { + // TODO we should makes this a hard validation and throw an exception but we need a good way to add backwards layer + // for it. Maybe start with a deprecation layer + if (isValidActionName(actionName) == false) { + logger.warn("invalid action name [" + actionName + "] must start with one of: " + + TransportService.VALID_ACTION_PREFIXES ); + } + } + + /** + * Returns true iff the action name starts with a valid prefix. + * + * @see #VALID_ACTION_PREFIXES + */ + public static boolean isValidActionName(String actionName) { + for (String prefix : VALID_ACTION_PREFIXES) { + if (actionName.startsWith(prefix)) { + return true; + } + } + return false; + } + /** * Registers a new request handler * @@ -732,10 +760,11 @@ public TransportAddress[] addressesFromString(String address, int perAddressLimi */ public void registerRequestHandler(String action, Supplier requestFactory, String executor, TransportRequestHandler handler) { + validateActionName(action); handler = interceptor.interceptHandler(action, executor, false, handler); RequestHandlerRegistry reg = new RequestHandlerRegistry<>( action, Streamable.newWriteableReader(requestFactory), taskManager, handler, executor, false, true); - registerRequestHandler(reg); + transport.registerRequestHandler(reg); } /** @@ -749,10 +778,11 @@ public void registerRequestHandler(String act public void registerRequestHandler(String action, String executor, Writeable.Reader requestReader, TransportRequestHandler handler) { + validateActionName(action); handler = interceptor.interceptHandler(action, executor, false, handler); RequestHandlerRegistry reg = new RequestHandlerRegistry<>( action, requestReader, taskManager, handler, executor, false, true); - registerRequestHandler(reg); + transport.registerRequestHandler(reg); } /** @@ -769,10 +799,11 @@ public void registerRequestHandler(String act String executor, boolean forceExecution, boolean canTripCircuitBreaker, TransportRequestHandler handler) { + validateActionName(action); handler = interceptor.interceptHandler(action, executor, forceExecution, handler); RequestHandlerRegistry reg = new RequestHandlerRegistry<>( action, Streamable.newWriteableReader(request), taskManager, handler, executor, forceExecution, canTripCircuitBreaker); - registerRequestHandler(reg); + transport.registerRequestHandler(reg); } /** @@ -790,24 +821,16 @@ public void registerRequestHandler(String act boolean canTripCircuitBreaker, Writeable.Reader requestReader, TransportRequestHandler handler) { + validateActionName(action); handler = interceptor.interceptHandler(action, executor, forceExecution, handler); RequestHandlerRegistry reg = new RequestHandlerRegistry<>( action, requestReader, taskManager, handler, executor, forceExecution, canTripCircuitBreaker); - registerRequestHandler(reg); - } - - private void registerRequestHandler(RequestHandlerRegistry reg) { - synchronized (requestHandlerMutex) { - if (requestHandlers.containsKey(reg.getAction())) { - throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); - } - requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); - } + transport.registerRequestHandler(reg); } /** called by the {@link Transport} implementation once a request has been sent */ - void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, - TransportRequestOptions options) { + public void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request, + TransportRequestOptions options) { if (traceEnabled() && shouldTraceAction(action)) { traceRequestSent(node, requestId, action, options); } @@ -818,14 +841,14 @@ protected boolean traceEnabled() { } /** called by the {@link Transport} implementation once a response was sent to calling node */ - void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions options) { + public void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions options) { if (traceEnabled() && shouldTraceAction(action)) { traceResponseSent(requestId, action); } } /** called by the {@link Transport} implementation after an exception was sent as a response to an incoming request */ - void onResponseSent(long requestId, String action, Exception e) { + public void onResponseSent(long requestId, String action, Exception e) { if (traceEnabled() && shouldTraceAction(action)) { traceResponseSent(requestId, action, e); } @@ -839,7 +862,7 @@ protected void traceResponseSent(long requestId, String action, Exception e) { * called by the {@link Transport} implementation when an incoming request arrives but before * any parsing of it has happened (with the exception of the requestId and action) */ - void onRequestReceived(long requestId, String action) { + public void onRequestReceived(long requestId, String action) { try { blockIncomingRequestsLatch.await(); } catch (InterruptedException e) { @@ -851,33 +874,24 @@ void onRequestReceived(long requestId, String action) { } public RequestHandlerRegistry getRequestHandler(String action) { - return requestHandlers.get(action); + return transport.getRequestHandler(action); } - /** - * called by the {@link Transport} implementation when a response or an exception has been received for a previously - * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not - * found. - */ - public TransportResponseHandler onResponseReceived(final long requestId) { - RequestHolder holder = clientHandlers.remove(requestId); + @Override + public void onResponseReceived(long requestId, Transport.ResponseContext holder) { if (holder == null) { checkForTimeout(requestId); - return null; - } - holder.cancelTimeout(); - if (traceEnabled() && shouldTraceAction(holder.action())) { + } else if (traceEnabled() && shouldTraceAction(holder.action())) { traceReceivedResponse(requestId, holder.connection().getNode(), holder.action()); } - return holder.handler(); } private void checkForTimeout(long requestId) { // lets see if its in the timeout holder, but sync on mutex to make sure any ongoing timeout handling has finished final DiscoveryNode sourceNode; final String action; - assert clientHandlers.get(requestId) == null; + assert responseHandlers.contains(requestId) == false; TimeoutInfoHolder timeoutInfoHolder = timeoutInfoHandlers.remove(requestId); if (timeoutInfoHolder != null) { long time = System.currentTimeMillis(); @@ -903,48 +917,18 @@ private void checkForTimeout(long requestId) { } } - void onNodeConnected(final DiscoveryNode node) { - // capture listeners before spawning the background callback so the following pattern won't trigger a call - // connectToNode(); connection is completed successfully - // addConnectionListener(); this listener shouldn't be called - final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); - getExecutorService().execute(() -> listenersToNotify.forEach(listener -> listener.onNodeConnected(node))); - } - - void onConnectionOpened(Transport.Connection connection) { - // capture listeners before spawning the background callback so the following pattern won't trigger a call - // connectToNode(); connection is completed successfully - // addConnectionListener(); this listener shouldn't be called - final Stream listenersToNotify = TransportService.this.connectionListeners.stream(); - getExecutorService().execute(() -> listenersToNotify.forEach(listener -> listener.onConnectionOpened(connection))); - } - - public void onNodeDisconnected(final DiscoveryNode node) { + @Override + public void onConnectionClosed(Transport.Connection connection) { try { - getExecutorService().execute( () -> { - for (final TransportConnectionListener connectionListener : connectionListeners) { - connectionListener.onNodeDisconnected(node); + List pruned = responseHandlers.prune(h -> h.connection().getCacheKey().equals(connection + .getCacheKey())); + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + getExecutorService().execute(() -> { + for (Transport.ResponseContext holderToNotify : pruned) { + holderToNotify.handler().handleException(new NodeDisconnectedException(connection.getNode(), holderToNotify.action())); } }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Rejected execution on NodeDisconnected", ex); - } - } - - void onConnectionClosed(Transport.Connection connection) { - try { - for (Map.Entry entry : clientHandlers.entrySet()) { - RequestHolder holder = entry.getValue(); - if (holder.connection().getCacheKey().equals(connection.getCacheKey())) { - final RequestHolder holderToNotify = clientHandlers.remove(entry.getKey()); - if (holderToNotify != null) { - // callback that an exception happened, but on a different thread since we don't - // want handlers to worry about stack overflows - getExecutorService().execute(() -> holderToNotify.handler().handleException(new NodeDisconnectedException( - connection.getNode(), holderToNotify.action()))); - } - } - } } catch (EsRejectedExecutionException ex) { logger.debug("Rejected execution on onConnectionClosed", ex); } @@ -970,32 +954,31 @@ protected void traceRequestSent(DiscoveryNode node, long requestId, String actio tracerLog.trace("[{}][{}] sent to [{}] (timeout: [{}])", requestId, action, node, options.timeout()); } - class TimeoutHandler implements Runnable { + final class TimeoutHandler implements Runnable { private final long requestId; - private final long sentTime = System.currentTimeMillis(); - + private final String action; + private final DiscoveryNode node; volatile ScheduledFuture future; - TimeoutHandler(long requestId) { + TimeoutHandler(long requestId, DiscoveryNode node, String action) { this.requestId = requestId; + this.node = node; + this.action = action; } @Override public void run() { - // we get first to make sure we only add the TimeoutInfoHandler if needed. - final RequestHolder holder = clientHandlers.get(requestId); - if (holder != null) { - // add it to the timeout information holder, in case we are going to get a response later + if (responseHandlers.contains(requestId)) { long timeoutTime = System.currentTimeMillis(); - timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(holder.connection().getNode(), holder.action(), sentTime, - timeoutTime)); + timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(node, action, sentTime, timeoutTime)); // now that we have the information visible via timeoutInfoHandlers, we try to remove the request id - final RequestHolder removedHolder = clientHandlers.remove(requestId); - if (removedHolder != null) { - assert removedHolder == holder : "two different holder instances for request [" + requestId + "]"; - removedHolder.handler().handleException( + final Transport.ResponseContext holder = responseHandlers.remove(requestId); + if (holder != null) { + assert holder.action().equals(action); + assert holder.connection().getNode().equals(node); + holder.handler().handleException( new ReceiveTimeoutTransportException(holder.connection().getNode(), holder.action(), "request_id [" + requestId + "] timed out after [" + (timeoutTime - sentTime) + "ms]")); } else { @@ -1006,11 +989,11 @@ public void run() { } /** - * cancels timeout handling. this is a best effort only to avoid running it. remove the requestId from {@link #clientHandlers} + * cancels timeout handling. this is a best effort only to avoid running it. remove the requestId from {@link #responseHandlers} * to make sure this doesn't run. */ public void cancel() { - assert clientHandlers.get(requestId) == null : + assert responseHandlers.contains(requestId) == false : "cancel must be called after the requestId [" + requestId + "] has been removed from clientHandlers"; FutureUtils.cancel(future); } @@ -1047,42 +1030,6 @@ public long timeoutTime() { } } - static class RequestHolder { - - private final TransportResponseHandler handler; - - private final Transport.Connection connection; - - private final String action; - - private final TimeoutHandler timeoutHandler; - - RequestHolder(TransportResponseHandler handler, Transport.Connection connection, String action, TimeoutHandler timeoutHandler) { - this.handler = handler; - this.connection = connection; - this.action = action; - this.timeoutHandler = timeoutHandler; - } - - public TransportResponseHandler handler() { - return handler; - } - - public Transport.Connection connection() { - return this.connection; - } - - public String action() { - return this.action; - } - - public void cancelTimeout() { - if (timeoutHandler != null) { - timeoutHandler.cancel(); - } - } - } - /** * This handler wrapper ensures that the response thread executes with the correct thread context. Before any of the handle methods * are invoked we restore the context. @@ -1091,6 +1038,7 @@ public static final class ContextRestoreResponseHandler delegate; private final Supplier contextSupplier; + private volatile TimeoutHandler handler; public ContextRestoreResponseHandler(Supplier contextSupplier, TransportResponseHandler delegate) { this.delegate = delegate; @@ -1104,6 +1052,9 @@ public T read(StreamInput in) throws IOException { @Override public void handleResponse(T response) { + if(handler != null) { + handler.cancel(); + } try (ThreadContext.StoredContext ignore = contextSupplier.get()) { delegate.handleResponse(response); } @@ -1111,6 +1062,9 @@ public void handleResponse(T response) { @Override public void handleException(TransportException exp) { + if(handler != null) { + handler.cancel(); + } try (ThreadContext.StoredContext ignore = contextSupplier.get()) { delegate.handleException(exp); } @@ -1126,6 +1080,10 @@ public String toString() { return getClass().getName() + "/" + delegate.toString(); } + void setTimeoutHandler(TimeoutHandler handler) { + this.handler = handler; + } + } static class DirectResponseChannel implements TransportChannel { @@ -1159,7 +1117,7 @@ public void sendResponse(TransportResponse response) throws IOException { @Override public void sendResponse(final TransportResponse response, TransportResponseOptions options) throws IOException { service.onResponseSent(requestId, action, response, options); - final TransportResponseHandler handler = service.onResponseReceived(requestId); + final TransportResponseHandler handler = service.responseHandlers.onResponseReceived(requestId, service); // ignore if its null, the service logs it if (handler != null) { final String executor = handler.executor(); @@ -1183,7 +1141,7 @@ protected void processResponse(TransportResponseHandler handler, TransportRespon @Override public void sendResponse(Exception exception) throws IOException { service.onResponseSent(requestId, action, exception); - final TransportResponseHandler handler = service.onResponseReceived(requestId); + final TransportResponseHandler handler = service.responseHandlers.onResponseReceived(requestId, service); // ignore if its null, the service logs it if (handler != null) { final RemoteTransportException rtx = wrapInRemote(exception); @@ -1224,6 +1182,7 @@ public Version getVersion() { } } + /** * Returns the internal thread pool */ diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index d9796847efa77..e1ba83374829f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -214,7 +214,7 @@ private Task startCancellableTestNodesAction(boolean waitForActionToStart, Colle for (int i = 0; i < testNodes.length; i++) { boolean shouldBlock = blockOnNodes.contains(testNodes[i]); logger.info("The action in the node [{}] should block: [{}]", testNodes[i].getNodeId(), shouldBlock); - actions[i] = new CancellableTestNodesAction(CLUSTER_SETTINGS, "testAction", threadPool, testNodes[i] + actions[i] = new CancellableTestNodesAction(CLUSTER_SETTINGS, "internal:testAction", threadPool, testNodes[i] .clusterService, testNodes[i].transportService, shouldBlock, actionLatch); } Task task = actions[0].execute(request, listener); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index edc79db79422d..4b017bbf57d68 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -276,7 +276,7 @@ private Task startBlockingTestNodesAction(CountDownLatch checkLatch, NodesReques TestNodesAction[] actions = new TestNodesAction[nodesCount]; for (int i = 0; i < testNodes.length; i++) { final int node = i; - actions[i] = new TestNodesAction(CLUSTER_SETTINGS, "testAction", threadPool, testNodes[i].clusterService, + actions[i] = new TestNodesAction(CLUSTER_SETTINGS, "internal:testAction", threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override protected NodeResponse nodeOperation(NodeRequest request) { @@ -341,7 +341,7 @@ public void onFailure(Exception e) { int testNodeNum = randomIntBetween(0, testNodes.length - 1); TestNode testNode = testNodes[testNodeNum]; ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.setActions("testAction*"); // pick all test actions + listTasksRequest.setActions("internal:testAction*"); // pick all test actions logger.info("Listing currently running tasks using node [{}]", testNodeNum); ListTasksResponse response = ActionTestUtils.executeBlocking(testNode.transportListTasksAction, listTasksRequest); logger.info("Checking currently running tasks"); @@ -361,7 +361,7 @@ public void onFailure(Exception e) { // Check task counts using transport with filtering testNode = testNodes[randomIntBetween(0, testNodes.length - 1)]; listTasksRequest = new ListTasksRequest(); - listTasksRequest.setActions("testAction[n]"); // only pick node actions + listTasksRequest.setActions("internal:testAction[n]"); // only pick node actions response = ActionTestUtils.executeBlocking(testNode.transportListTasksAction, listTasksRequest); assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { @@ -384,7 +384,7 @@ public void onFailure(Exception e) { } // Make sure that the main task on coordinating node is the task that was returned to us by execute() - listTasksRequest.setActions("testAction"); // only pick the main task + listTasksRequest.setActions("internal:testAction"); // only pick the main task response = ActionTestUtils.executeBlocking(testNode.transportListTasksAction, listTasksRequest); assertEquals(1, response.getTasks().size()); assertEquals(mainTask.getId(), response.getTasks().get(0).getId()); @@ -412,7 +412,7 @@ public void testFindChildTasks() throws Exception { // Get the parent task ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.setActions("testAction"); + listTasksRequest.setActions("internal:testAction"); ListTasksResponse response = ActionTestUtils.executeBlocking(testNode.transportListTasksAction, listTasksRequest); assertEquals(1, response.getTasks().size()); String parentNode = response.getTasks().get(0).getTaskId().getNodeId(); @@ -424,7 +424,7 @@ public void testFindChildTasks() throws Exception { response = ActionTestUtils.executeBlocking(testNode.transportListTasksAction, listTasksRequest); assertEquals(testNodes.length, response.getTasks().size()); for (TaskInfo task : response.getTasks()) { - assertEquals("testAction[n]", task.getAction()); + assertEquals("internal:testAction[n]", task.getAction()); assertEquals(parentNode, task.getParentTaskId().getNodeId()); assertEquals(parentTaskId, task.getParentTaskId().getId()); } @@ -446,7 +446,7 @@ public void testTasksDescriptions() throws Exception { // Check task counts using transport with filtering TestNode testNode = testNodes[randomIntBetween(0, testNodes.length - 1)]; ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.setActions("testAction[n]"); // only pick node actions + listTasksRequest.setActions("internal:testAction[n]"); // only pick node actions ListTasksResponse response = ActionTestUtils.executeBlocking(testNode.transportListTasksAction, listTasksRequest); assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { @@ -488,7 +488,7 @@ public void onFailure(Exception e) { responseLatch.countDown(); } }); - String actionName = "testAction"; // only pick the main action + String actionName = "internal:testAction"; // only pick the main action // Try to cancel main task using action name CancelTasksRequest request = new CancelTasksRequest(); @@ -538,10 +538,10 @@ public void testFailedTasksCount() throws ExecutionException, InterruptedExcepti setupTestNodes(settings); connectNodes(testNodes); TestNodesAction[] actions = new TestNodesAction[nodesCount]; - RecordingTaskManagerListener[] listeners = setupListeners(testNodes, "testAction*"); + RecordingTaskManagerListener[] listeners = setupListeners(testNodes, "internal:testAction*"); for (int i = 0; i < testNodes.length; i++) { final int node = i; - actions[i] = new TestNodesAction(CLUSTER_SETTINGS, "testAction", threadPool, testNodes[i].clusterService, + actions[i] = new TestNodesAction(CLUSTER_SETTINGS, "internal:testAction", threadPool, testNodes[i].clusterService, testNodes[i].transportService) { @Override protected NodeResponse nodeOperation(NodeRequest request) { @@ -581,7 +581,7 @@ public void testTaskLevelActionFailures() throws ExecutionException, Interrupted for (int i = 0; i < testNodes.length; i++) { final int node = i; // Simulate task action that fails on one of the tasks on one of the nodes - tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "testTasksAction", testNodes[i].clusterService, + tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "internal:testTasksAction", testNodes[i].clusterService, testNodes[i].transportService) { @Override protected void taskOperation(TestTasksRequest request, Task task, ActionListener listener) { @@ -619,7 +619,7 @@ protected void taskOperation(TestTasksRequest request, Task task, ActionListener // Run task action on node tasks that are currently running // should be successful on all nodes except one TestTasksRequest testTasksRequest = new TestTasksRequest(); - testTasksRequest.setActions("testAction[n]"); // pick all test actions + testTasksRequest.setActions("internal:testAction[n]"); // pick all test actions TestTasksResponse response = ActionTestUtils.executeBlocking(tasksActions[0], testTasksRequest); assertThat(response.getTaskFailures(), hasSize(1)); // one task failed assertThat(response.getTaskFailures().get(0).getReason(), containsString("Task level failure")); @@ -660,7 +660,7 @@ public void testTaskNodeFiltering() throws ExecutionException, InterruptedExcept final int node = i; // Simulate a task action that works on all nodes except nodes listed in filterNodes. // We are testing that it works. - tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "testTasksAction", + tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "internal:testTasksAction", testNodes[i].clusterService, testNodes[i].transportService) { @Override @@ -689,7 +689,7 @@ protected void taskOperation(TestTasksRequest request, Task task, ActionListener // Run task action on node tasks that are currently running // should be successful on all nodes except nodes that we filtered out TestTasksRequest testTasksRequest = new TestTasksRequest(); - testTasksRequest.setActions("testAction[n]"); // pick all test actions + testTasksRequest.setActions("internal:testAction[n]"); // pick all test actions TestTasksResponse response = ActionTestUtils.executeBlocking(tasksActions[randomIntBetween(0, nodesCount - 1)], testTasksRequest); // Get successful responses from all nodes except nodes that we filtered out diff --git a/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java b/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java index 2c2694116b216..6e13d4a5d7705 100644 --- a/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/main/MainActionTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import java.util.Collections; @@ -66,8 +67,8 @@ public void testMainActionClusterAvailable() { ClusterState state = ClusterState.builder(clusterName).blocks(blocks).build(); when(clusterService.state()).thenReturn(state); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService + .NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportMainAction action = new TransportMainAction(settings, transportService, mock(ActionFilters.class), clusterService); AtomicReference responseRef = new AtomicReference<>(); action.doExecute(mock(Task.class), new MainRequest(), new ActionListener() { diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java index fc3fb34a6cb19..f327086cd00e5 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; @@ -128,10 +129,9 @@ public void onFailure(Exception e) { private TransportMultiSearchAction createTransportMultiSearchAction(boolean controlledClock, AtomicLong expected) { Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); - TaskManager taskManager = mock(TaskManager.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet()) { + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), + UUIDs.randomBase64UUID()), null, Collections.emptySet()) { @Override public TaskManager getTaskManager() { return taskManager; @@ -140,7 +140,6 @@ public TaskManager getTaskManager() { ActionFilters actionFilters = new ActionFilters(new HashSet<>()); ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build()); - IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY); final int availableProcessors = Runtime.getRuntime().availableProcessors(); AtomicInteger counter = new AtomicInteger(); diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index a43584a4130e4..dade5eadb1832 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -38,6 +37,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.junit.After; import org.junit.Before; @@ -82,8 +82,7 @@ public void testBatchExecute() throws Exception { ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); ThreadPool threadPool = new ThreadPool(settings); - TaskManager taskManager = mock(TaskManager.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool, + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, Collections.emptySet()) { @@ -94,7 +93,6 @@ public TaskManager getTaskManager() { }; ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build()); - IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(Settings.EMPTY); // Keep track of the number of concurrent searches started by multi search api, // and if there are more searches than is allowed create an error and remember that. diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index f2b18a8c8f561..b27bc9ad79432 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -174,7 +174,7 @@ public void testLocalOperationWithoutBlocks() throws ExecutionException, Interru setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); - new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { + new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool) { @Override protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { if (masterOperationFailure) { @@ -211,7 +211,7 @@ public void testLocalOperationWithBlocks() throws ExecutionException, Interrupte .blocks(ClusterBlocks.builder().addGlobalBlock(block)).build(); setState(clusterService, stateWithBlock); - new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { + new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool) { @Override protected ClusterBlockException checkBlock(Request request, ClusterState state) { Set blocks = state.blocks().global(); @@ -253,7 +253,7 @@ public void testCheckBlockThrowsException() throws InterruptedException { .blocks(ClusterBlocks.builder().addGlobalBlock(block)).build(); setState(clusterService, stateWithBlock); - new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { + new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool) { @Override protected ClusterBlockException checkBlock(Request request, ClusterState state) { Set blocks = state.blocks().global(); @@ -281,7 +281,7 @@ public void testForceLocalOperation() throws ExecutionException, InterruptedExce setState(clusterService, ClusterStateCreationUtils.state(localNode, randomFrom(localNode, remoteNode, null), allNodes)); - new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { + new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool) { @Override protected boolean localExecute(Request request) { return true; @@ -296,7 +296,7 @@ public void testMasterNotAvailable() throws ExecutionException, InterruptedExcep Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0)); setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); - new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener); + new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool).execute(request, listener); assertTrue(listener.isDone()); assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class); } @@ -305,7 +305,7 @@ public void testMasterBecomesAvailable() throws ExecutionException, InterruptedE Request request = new Request(); setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); - new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener); + new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool).execute(request, listener); assertFalse(listener.isDone()); setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); assertTrue(listener.isDone()); @@ -317,13 +317,13 @@ public void testDelegateToMaster() throws ExecutionException, InterruptedExcepti setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); - new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener); + new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool).execute(request, listener); assertThat(transport.capturedRequests().length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; assertTrue(capturedRequest.node.isMasterNode()); assertThat(capturedRequest.request, equalTo(request)); - assertThat(capturedRequest.action, equalTo("testAction")); + assertThat(capturedRequest.action, equalTo("internal:testAction")); Response response = new Response(); transport.handleResponse(capturedRequest.requestId, response); @@ -340,14 +340,14 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted .version(randomIntBetween(0, 10))); // use a random base version so it can go down when simulating a restart. PlainActionFuture listener = new PlainActionFuture<>(); - new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener); + new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool).execute(request, listener); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests.length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = capturedRequests[0]; assertTrue(capturedRequest.node.isMasterNode()); assertThat(capturedRequest.request, equalTo(request)); - assertThat(capturedRequest.action, equalTo("testAction")); + assertThat(capturedRequest.action, equalTo("internal:testAction")); if (rejoinSameMaster) { transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(masterNode, "Fake error")); @@ -380,7 +380,7 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted capturedRequest = capturedRequests[0]; assertTrue(capturedRequest.node.isMasterNode()); assertThat(capturedRequest.request, equalTo(request)); - assertThat(capturedRequest.action, equalTo("testAction")); + assertThat(capturedRequest.action, equalTo("internal:testAction")); } else if (failsWithConnectTransportException) { transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(masterNode, "Fake error")); assertFalse(listener.isDone()); @@ -413,7 +413,7 @@ public void testMasterFailoverAfterStepDown() throws ExecutionException, Interru setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); - new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { + new Action(Settings.EMPTY, "internal:testAction", transportService, clusterService, threadPool) { @Override protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { // The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery @@ -429,7 +429,7 @@ protected void masterOperation(Request request, ClusterState state, ActionListen CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0]; assertTrue(capturedRequest.node.isMasterNode()); assertThat(capturedRequest.request, equalTo(request)); - assertThat(capturedRequest.action, equalTo("testAction")); + assertThat(capturedRequest.action, equalTo("internal:testAction")); transport.handleResponse(capturedRequest.requestId, response); assertTrue(listener.isDone()); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 012cc71437a80..4e3af6cc41277 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -210,7 +210,7 @@ private class TestBroadcastReplicationAction extends TransportBroadcastReplicati TestBroadcastReplicationAction(Settings settings, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportReplicationAction replicatedBroadcastShardAction) { - super("test-broadcast-replication-action", DummyBroadcastRequest::new, settings, clusterService, transportService, + super("internal:test-broadcast-replication-action", DummyBroadcastRequest::new, settings, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedBroadcastShardAction); } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index a34c755e05272..08301e99d6a69 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -168,7 +168,7 @@ public void setUp() throws Exception { transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); - action = new TestAction(Settings.EMPTY, "testAction", transportService, clusterService, shardStateAction, threadPool); + action = new TestAction(Settings.EMPTY, "internal:testAction", transportService, clusterService, shardStateAction, threadPool); } @After @@ -196,7 +196,7 @@ public void testBlocks() throws ExecutionException, InterruptedException { Request request = new Request(); PlainActionFuture listener = new PlainActionFuture<>(); ReplicationTask task = maybeTask(); - TestAction action = new TestAction(Settings.EMPTY, "testActionWithBlocks", + TestAction action = new TestAction(Settings.EMPTY, "internal:testActionWithBlocks", transportService, clusterService, shardStateAction, threadPool) { @Override protected ClusterBlockLevel globalBlockLevel() { @@ -236,7 +236,8 @@ protected ClusterBlockLevel globalBlockLevel() { ClusterBlockException.class); assertIndexShardUninitialized(); - action = new TestAction(Settings.EMPTY, "testActionWithNoBlocks", transportService, clusterService, shardStateAction, threadPool) { + action = new TestAction(Settings.EMPTY, "internal:testActionWithNoBlocks", transportService, clusterService, shardStateAction, + threadPool) { @Override protected ClusterBlockLevel globalBlockLevel() { return null; @@ -287,7 +288,7 @@ public void testNotStartedPrimary() throws InterruptedException, ExecutionExcept transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); assertThat(capturedRequests, notNullValue()); assertThat(capturedRequests.size(), equalTo(1)); - assertThat(capturedRequests.get(0).action, equalTo("testAction[p]")); + assertThat(capturedRequests.get(0).action, equalTo("internal:testAction[p]")); assertIndexShardCounter(0); } @@ -339,7 +340,7 @@ public void testNoRerouteOnStaleClusterState() throws InterruptedException, Exec transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId); assertThat(capturedRequests, notNullValue()); assertThat(capturedRequests.size(), equalTo(1)); - assertThat(capturedRequests.get(0).action, equalTo("testAction[p]")); + assertThat(capturedRequests.get(0).action, equalTo("internal:testAction[p]")); assertIndexShardCounter(0); } @@ -378,7 +379,7 @@ public void testClosedIndexOnReroute() throws InterruptedException { ReplicationTask task = maybeTask(); ClusterBlockLevel indexBlockLevel = randomBoolean() ? ClusterBlockLevel.WRITE : null; - TestAction action = new TestAction(Settings.EMPTY, "testActionWithBlocks", transportService, + TestAction action = new TestAction(Settings.EMPTY, "internal:testActionWithBlocks", transportService, clusterService, shardStateAction, threadPool) { @Override protected ClusterBlockLevel indexBlockLevel() { @@ -416,7 +417,7 @@ public void testStalePrimaryShardOnReroute() throws InterruptedException { reroutePhase.run(); CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests, arrayWithSize(1)); - assertThat(capturedRequests[0].action, equalTo("testAction[p]")); + assertThat(capturedRequests[0].action, equalTo("internal:testAction[p]")); assertPhase(task, "waiting_on_primary"); assertFalse(request.isRetrySet.get()); transport.handleRemoteError(capturedRequests[0].requestId, randomRetryPrimaryException(shardId)); @@ -427,7 +428,7 @@ public void testStalePrimaryShardOnReroute() throws InterruptedException { assertThat(listener.isDone(), equalTo(false)); capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests, arrayWithSize(1)); - assertThat(capturedRequests[0].action, equalTo("testAction[p]")); + assertThat(capturedRequests[0].action, equalTo("internal:testAction[p]")); assertPhase(task, "waiting_on_primary"); transport.handleRemoteError(capturedRequests[0].requestId, randomRetryPrimaryException(shardId)); assertListenerThrows("must throw index not found exception", listener, ElasticsearchException.class); @@ -438,7 +439,7 @@ public void testStalePrimaryShardOnReroute() throws InterruptedException { setState(clusterService, clusterService.state()); capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests, arrayWithSize(1)); - assertThat(capturedRequests[0].action, equalTo("testAction[p]")); + assertThat(capturedRequests[0].action, equalTo("internal:testAction[p]")); } } @@ -474,10 +475,10 @@ public void testRoutePhaseExecutesRequest() { assertThat(capturedRequests, notNullValue()); assertThat(capturedRequests.size(), equalTo(1)); if (clusterService.state().nodes().getLocalNodeId().equals(primaryNodeId)) { - assertThat(capturedRequests.get(0).action, equalTo("testAction[p]")); + assertThat(capturedRequests.get(0).action, equalTo("internal:testAction[p]")); assertPhase(task, "waiting_on_primary"); } else { - assertThat(capturedRequests.get(0).action, equalTo("testAction")); + assertThat(capturedRequests.get(0).action, equalTo("internal:testAction")); assertPhase(task, "rerouted"); } assertFalse(request.isRetrySet.get()); @@ -531,7 +532,7 @@ public void execute() throws Exception { transport.capturedRequestsByTargetNode().get(primaryShard.relocatingNodeId()); assertThat(requests, notNullValue()); assertThat(requests.size(), equalTo(1)); - assertThat("primary request was not delegated to relocation target", requests.get(0).action, equalTo("testAction[p]")); + assertThat("primary request was not delegated to relocation target", requests.get(0).action, equalTo("internal:testAction[p]")); assertThat("primary term not properly set on primary delegation", ((TransportReplicationAction.ConcreteShardRequest)requests.get(0).request).getPrimaryTerm(), equalTo(primaryTerm)); assertPhase(task, "primary_delegation"); @@ -705,7 +706,8 @@ public void testSeqNoIsSetOnPrimary() throws Exception { }; TestAction action = - new TestAction(Settings.EMPTY, "testSeqNoIsSetOnPrimary", transportService, clusterService, shardStateAction, threadPool) { + new TestAction(Settings.EMPTY, "internal:testSeqNoIsSetOnPrimary", transportService, clusterService, shardStateAction, + threadPool) { @Override protected IndexShard getIndexShard(ShardId shardId) { return shard; @@ -788,8 +790,8 @@ public void testReplicasCounter() throws Exception { final ShardRouting replicaRouting = state.getRoutingTable().shardRoutingTable(shardId).replicaShards().get(0); boolean throwException = randomBoolean(); final ReplicationTask task = maybeTask(); - TestAction action = new TestAction(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, shardStateAction, - threadPool) { + TestAction action = new TestAction(Settings.EMPTY, "internal:testActionWithExceptions", transportService, clusterService, + shardStateAction, threadPool) { @Override protected ReplicaResult shardOperationOnReplica(Request request, IndexShard replica) { assertIndexShardCounter(1); @@ -924,8 +926,8 @@ public void testRetryOnReplica() throws Exception { setState(clusterService, state); AtomicBoolean throwException = new AtomicBoolean(true); final ReplicationTask task = maybeTask(); - TestAction action = new TestAction(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, shardStateAction, - threadPool) { + TestAction action = new TestAction(Settings.EMPTY, "internal:testActionWithExceptions", transportService, clusterService, + shardStateAction, threadPool) { @Override protected ReplicaResult shardOperationOnReplica(Request request, IndexShard replica) { assertPhase(task, "replica"); @@ -960,7 +962,7 @@ protected ReplicaResult shardOperationOnReplica(Request request, IndexShard repl assertThat(capturedRequests, notNullValue()); assertThat(capturedRequests.size(), equalTo(1)); final CapturingTransport.CapturedRequest capturedRequest = capturedRequests.get(0); - assertThat(capturedRequest.action, equalTo("testActionWithExceptions[r]")); + assertThat(capturedRequest.action, equalTo("internal:testActionWithExceptions[r]")); assertThat(capturedRequest.request, instanceOf(TransportReplicationAction.ConcreteReplicaRequest.class)); assertThat(((TransportReplicationAction.ConcreteReplicaRequest) capturedRequest.request).getGlobalCheckpoint(), equalTo(checkpoint)); @@ -988,8 +990,8 @@ public void testRetryOnReplicaWithRealTransport() throws Exception { transportService.acceptIncomingRequests(); AtomicBoolean calledSuccessfully = new AtomicBoolean(false); - TestAction action = new TestAction(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, shardStateAction, - threadPool) { + TestAction action = new TestAction(Settings.EMPTY, "internal:testActionWithExceptions", transportService, clusterService, + shardStateAction, threadPool) { @Override protected ReplicaResult shardOperationOnReplica(Request request, IndexShard replica) { assertPhase(task, "replica"); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index b894188dabef5..bfcc5938a8690 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -52,6 +52,7 @@ import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; @@ -259,7 +260,7 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { transportService.start(); transportService.acceptIncomingRequests(); ShardStateAction shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool); - TestAction action = new TestAction(Settings.EMPTY, "testAction", transportService, + TestAction action = new TestAction(Settings.EMPTY, "internal:testAction", transportService, clusterService, shardStateAction, threadPool); final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); @@ -355,10 +356,10 @@ protected TestAction() { } protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentFailureOnReplica) { - super(Settings.EMPTY, "test", - new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet()), null, - null, null, null, new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, + super(Settings.EMPTY, "internal:test", + new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, null, Collections.emptySet()), null, null, null, null, + new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), TestRequest::new, TestRequest::new, ThreadPool.Names.SAME); this.withDocumentFailureOnPrimary = withDocumentFailureOnPrimary; this.withDocumentFailureOnReplica = withDocumentFailureOnReplica; diff --git a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index 9be0d55d77e6a..669a678b77e31 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Settings; @@ -34,13 +35,14 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportStats; import java.io.IOException; @@ -51,22 +53,22 @@ import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; abstract class FailAndRetryMockTransport implements Transport { private final Random random; private final ClusterName clusterName; + private volatile Map requestHandlers = Collections.emptyMap(); + final Object requestHandlerMutex = new Object(); + private final ResponseHandlers responseHandlers = new ResponseHandlers(); + private TransportConnectionListener listener; private boolean connectMode = true; - private TransportService transportService; - private final AtomicInteger connectTransportExceptions = new AtomicInteger(); private final AtomicInteger failures = new AtomicInteger(); private final AtomicInteger successes = new AtomicInteger(); private final Set triedNodes = new CopyOnWriteArraySet<>(); - private final AtomicLong requestId = new AtomicLong(); FailAndRetryMockTransport(Random random, ClusterName clusterName) { this.random = new Random(random.nextLong()); @@ -90,12 +92,12 @@ public void sendRequest(long requestId, String action, TransportRequest request, //we make sure that nodes get added to the connected ones when calling addTransportAddress, by returning proper nodes info if (connectMode) { if (TransportLivenessAction.NAME.equals(action)) { - TransportResponseHandler transportResponseHandler = transportService.onResponseReceived(requestId); + TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); transportResponseHandler.handleResponse(new LivenessResponse(ClusterName.CLUSTER_NAME_SETTING. getDefault(Settings.EMPTY), node)); } else if (ClusterStateAction.NAME.equals(action)) { - TransportResponseHandler transportResponseHandler = transportService.onResponseReceived(requestId); + TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); ClusterState clusterState = getMockClusterState(node); transportResponseHandler.handleResponse(new ClusterStateResponse(clusterName, clusterState, 0L)); } else { @@ -116,7 +118,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, //throw whatever exception that is not a subclass of ConnectTransportException throw new IllegalStateException(); } else { - TransportResponseHandler transportResponseHandler = transportService.onResponseReceived(requestId); + TransportResponseHandler transportResponseHandler = responseHandlers.onResponseReceived(requestId, listener); if (random.nextBoolean()) { successes.incrementAndGet(); transportResponseHandler.handleResponse(newResponse()); @@ -162,10 +164,6 @@ public Set triedNodes() { return triedNodes; } - @Override - public void setTransportService(TransportService transportServiceAdapter) { - this.transportService = transportServiceAdapter; - } @Override public BoundTransportAddress boundAddress() { @@ -224,12 +222,36 @@ public Map profileBoundAddresses() { } @Override - public long newRequestId() { - return requestId.incrementAndGet(); + public TransportStats getStats() { + throw new UnsupportedOperationException(); } @Override - public TransportStats getStats() { + public void registerRequestHandler(RequestHandlerRegistry reg) { + synchronized (requestHandlerMutex) { + if (requestHandlers.containsKey(reg.getAction())) { + throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); + } + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + } + } + @Override + public ResponseHandlers getResponseHandlers() { + return responseHandlers; + } + + @Override + public RequestHandlerRegistry getRequestHandler(String action) { + return requestHandlers.get(action); + } + + @Override + public void addConnectionListener(TransportConnectionListener listener) { + this.listener = listener; + } + + @Override + public boolean removeConnectionListener(TransportConnectionListener listener) { throw new UnsupportedOperationException(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 828b385f85fa5..eb93dad5db8a0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -35,7 +35,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; @@ -54,7 +56,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; import static org.hamcrest.Matchers.equalTo; @@ -171,12 +172,28 @@ public void tearDown() throws Exception { } final class MockTransport implements Transport { - private final AtomicLong requestId = new AtomicLong(); Set connectedNodes = ConcurrentCollections.newConcurrentSet(); volatile boolean randomConnectionExceptions = false; + private ResponseHandlers responseHandlers = new ResponseHandlers(); + private TransportConnectionListener listener = new TransportConnectionListener() {}; @Override - public void setTransportService(TransportService service) { + public void registerRequestHandler(RequestHandlerRegistry reg) { + } + + @Override + public RequestHandlerRegistry getRequestHandler(String action) { + return null; + } + + @Override + public void addConnectionListener(TransportConnectionListener listener) { + this.listener = listener; + } + + @Override + public boolean removeConnectionListener(TransportConnectionListener listener) { + throw new UnsupportedOperationException(); } @Override @@ -208,12 +225,14 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil throw new ConnectTransportException(node, "simulated"); } connectedNodes.add(node); + listener.onNodeConnected(node); } } @Override public void disconnectFromNode(DiscoveryNode node) { connectedNodes.remove(node); + listener.onNodeDisconnected(node); } @Override @@ -226,20 +245,22 @@ public DiscoveryNode getNode() { @Override public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) - throws IOException, TransportException { + throws TransportException { } @Override - public void close() throws IOException { + public void close() { } }; } @Override - public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) throws IOException { - return getConnection(node); + public Connection openConnection(DiscoveryNode node, ConnectionProfile profile) { + Connection connection = getConnection(node); + listener.onConnectionOpened(connection); + return connection; } @Override @@ -247,11 +268,6 @@ public List getLocalAddresses() { return null; } - @Override - public long newRequestId() { - return requestId.incrementAndGet(); - } - @Override public Lifecycle.State lifecycleState() { return null; @@ -278,5 +294,10 @@ public void close() {} public TransportStats getStats() { throw new UnsupportedOperationException(); } + + @Override + public ResponseHandlers getResponseHandlers() { + return responseHandlers; + } } } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 0ab965fd522a3..1ea15321d5a0b 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -139,7 +139,7 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterSettings); Environment environment = TestEnvironment.newEnvironment(settings); - Transport transport = null; // it's not used + Transport transport = mock(Transport.class); // it's not used // mocks clusterService = mock(ClusterService.class); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 4a496167c80c1..cc971ed1b043a 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -53,6 +53,7 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; @@ -399,7 +400,7 @@ private IndicesClusterStateService createIndicesClusterStateService(DiscoveryNod when(threadPool.generic()).thenReturn(mock(ExecutorService.class)); final MockIndicesService indicesService = indicesServiceSupplier.get(); final Settings settings = Settings.builder().put("node.name", discoveryNode.getName()).build(); - final TransportService transportService = new TransportService(settings, null, threadPool, + final TransportService transportService = new TransportService(settings, mock(Transport.class), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, Collections.emptySet()); diff --git a/server/src/test/java/org/elasticsearch/transport/ActionNamesIT.java b/server/src/test/java/org/elasticsearch/transport/ActionNamesIT.java deleted file mode 100644 index 8ad8d42e5ef99..0000000000000 --- a/server/src/test/java/org/elasticsearch/transport/ActionNamesIT.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -import org.elasticsearch.test.ESIntegTestCase; - -import static org.hamcrest.CoreMatchers.either; -import static org.hamcrest.CoreMatchers.startsWith; - -/** - * This test verifies that all of the action names follow our defined naming conventions. - * The identified categories are: - * - indices:admin: apis that allow to perform administration tasks against indices - * - indices:data: apis that are about data - * - indices:read: apis that read data - * - indices:write: apis that write data - * - cluster:admin: cluster apis that allow to perform administration tasks - * - cluster:monitor: cluster apis that allow to monitor the system - * - internal: internal actions that are used from node to node but not directly exposed to users - * - * Any transport action belongs to one of the above categories and its name starts with its category, followed by a '/' - * and the name of the api itself (e.g. cluster:admin/nodes/restart). - * When an api exposes multiple transport handlers, some of which are invoked internally during the execution of the api, - * we use the `[n]` suffix to identify node actions and the `[s]` suffix to identify shard actions. - */ -public class ActionNamesIT extends ESIntegTestCase { - public void testActionNamesCategories() throws NoSuchFieldException, IllegalAccessException { - TransportService transportService = internalCluster().getInstance(TransportService.class); - for (String action : transportService.requestHandlers.keySet()) { - assertThat("action doesn't belong to known category", action, - either(startsWith("indices:admin")).or(startsWith("indices:monitor")) - .or(startsWith("indices:data/read")).or(startsWith("indices:data/write")) - .or(startsWith("cluster:admin")).or(startsWith("cluster:monitor")) - .or(startsWith("internal:"))); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java index 491ba123a451d..bccb1cc68aa0c 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java @@ -87,36 +87,36 @@ private MockTransportService buildService(final Version version) { public void testSendMessage() throws InterruptedException { - serviceA.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceA.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse(); response.targetNode = "TS_A"; channel.sendResponse(response); }); - TransportActionProxy.registerProxyAction(serviceA, "/test", SimpleTestResponse::new); + TransportActionProxy.registerProxyAction(serviceA, "internal:test", SimpleTestResponse::new); serviceA.connectToNode(nodeB); - serviceB.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse(); response.targetNode = "TS_B"; channel.sendResponse(response); }); - TransportActionProxy.registerProxyAction(serviceB, "/test", SimpleTestResponse::new); + TransportActionProxy.registerProxyAction(serviceB, "internal:test", SimpleTestResponse::new); serviceB.connectToNode(nodeC); - serviceC.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceC.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse(); response.targetNode = "TS_C"; channel.sendResponse(response); }); - TransportActionProxy.registerProxyAction(serviceC, "/test", SimpleTestResponse::new); + TransportActionProxy.registerProxyAction(serviceC, "internal:test", SimpleTestResponse::new); CountDownLatch latch = new CountDownLatch(1); - serviceA.sendRequest(nodeB, TransportActionProxy.getProxyAction("/test"), TransportActionProxy.wrapRequest(nodeC, + serviceA.sendRequest(nodeB, TransportActionProxy.getProxyAction("internal:test"), TransportActionProxy.wrapRequest(nodeC, new SimpleTestRequest("TS_A")), new TransportResponseHandler() { @Override public SimpleTestResponse newInstance() { @@ -150,33 +150,33 @@ public String executor() { } public void testException() throws InterruptedException { - serviceA.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceA.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse(); response.targetNode = "TS_A"; channel.sendResponse(response); }); - TransportActionProxy.registerProxyAction(serviceA, "/test", SimpleTestResponse::new); + TransportActionProxy.registerProxyAction(serviceA, "internal:test", SimpleTestResponse::new); serviceA.connectToNode(nodeB); - serviceB.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { assertEquals(request.sourceNode, "TS_A"); SimpleTestResponse response = new SimpleTestResponse(); response.targetNode = "TS_B"; channel.sendResponse(response); }); - TransportActionProxy.registerProxyAction(serviceB, "/test", SimpleTestResponse::new); + TransportActionProxy.registerProxyAction(serviceB, "internal:test", SimpleTestResponse::new); serviceB.connectToNode(nodeC); - serviceC.registerRequestHandler("/test", SimpleTestRequest::new, ThreadPool.Names.SAME, + serviceC.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { throw new ElasticsearchException("greetings from TS_C"); }); - TransportActionProxy.registerProxyAction(serviceC, "/test", SimpleTestResponse::new); + TransportActionProxy.registerProxyAction(serviceC, "internal:test", SimpleTestResponse::new); CountDownLatch latch = new CountDownLatch(1); - serviceA.sendRequest(nodeB, TransportActionProxy.getProxyAction("/test"), TransportActionProxy.wrapRequest(nodeC, + serviceA.sendRequest(nodeB, TransportActionProxy.getProxyAction("internal:test"), TransportActionProxy.wrapRequest(nodeC, new SimpleTestRequest("TS_A")), new TransportResponseHandler() { @Override public SimpleTestResponse newInstance() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index c63a1c9c6e68f..275bca4d28dd3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -104,6 +104,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -139,6 +140,7 @@ import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.node.NodeMocksPlugin; +import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ScriptMetaData; @@ -152,6 +154,10 @@ import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportService; import org.hamcrest.Matchers; import org.junit.After; import org.junit.AfterClass; @@ -2011,6 +2017,7 @@ protected Collection> getMockPlugins() { mocks.add(MockHttpTransport.TestPlugin.class); } mocks.add(TestSeedPlugin.class); + mocks.add(AssertActionNamePlugin.class); return Collections.unmodifiableList(mocks); } @@ -2021,6 +2028,25 @@ public List> getSettings() { } } + public static final class AssertActionNamePlugin extends Plugin implements NetworkPlugin { + @Override + public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, + ThreadContext threadContext) { + return Arrays.asList(new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler(String action, String executor, + boolean forceExecution, + TransportRequestHandler actualHandler) { + if (TransportService.isValidActionName(action) == false) { + throw new IllegalArgumentException("invalid action name [" + action + "] must start with one of: " + + TransportService.VALID_ACTION_PREFIXES ); + } + return actualHandler; + } + }); + } + } + /** * Returns the client ratio configured via */ diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index 318c70c2933d8..ffdf79c0636b2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; @@ -33,13 +34,14 @@ import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.SendRequestTransportException; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportStats; import java.io.IOException; @@ -54,14 +56,16 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicLong; import static org.apache.lucene.util.LuceneTestCase.rarely; /** A transport class that doesn't send anything but rather captures all requests for inspection from tests */ public class CapturingTransport implements Transport { - private TransportService transportService; + private volatile Map requestHandlers = Collections.emptyMap(); + final Object requestHandlerMutex = new Object(); + private final ResponseHandlers responseHandlers = new ResponseHandlers(); + private TransportConnectionListener listener; public static class CapturedRequest { public final DiscoveryNode node; @@ -79,8 +83,6 @@ public CapturedRequest(DiscoveryNode node, long requestId, String action, Transp private ConcurrentMap> requests = new ConcurrentHashMap<>(); private BlockingQueue capturedRequests = ConcurrentCollections.newBlockingQueue(); - private final AtomicLong requestId = new AtomicLong(); - /** returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} */ public CapturedRequest[] capturedRequests() { @@ -137,7 +139,7 @@ public void clear() { /** simulate a response for the given requestId */ public void handleResponse(final long requestId, final TransportResponse response) { - transportService.onResponseReceived(requestId).handleResponse(response); + responseHandlers.onResponseReceived(requestId, listener).handleResponse(response); } /** @@ -189,7 +191,7 @@ public void handleRemoteError(final long requestId, final Throwable t) { * @param e the failure */ public void handleError(final long requestId, final TransportException e) { - transportService.onResponseReceived(requestId).handleException(e); + responseHandlers.onResponseReceived(requestId, listener).handleException(e); } @Override @@ -219,11 +221,6 @@ public TransportStats getStats() { throw new UnsupportedOperationException(); } - @Override - public void setTransportService(TransportService transportService) { - this.transportService = transportService; - } - @Override public BoundTransportAddress boundAddress() { return null; @@ -285,11 +282,6 @@ public List getLocalAddresses() { return Collections.emptyList(); } - @Override - public long newRequestId() { - return requestId.incrementAndGet(); - } - public Connection getConnection(DiscoveryNode node) { try { return openConnection(node, null); @@ -297,4 +289,40 @@ public Connection getConnection(DiscoveryNode node) { throw new UncheckedIOException(e); } } + + @Override + public void registerRequestHandler(RequestHandlerRegistry reg) { + synchronized (requestHandlerMutex) { + if (requestHandlers.containsKey(reg.getAction())) { + throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); + } + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + } + } + @Override + public ResponseHandlers getResponseHandlers() { + return responseHandlers; + } + + @Override + public RequestHandlerRegistry getRequestHandler(String action) { + return requestHandlers.get(action); + } + + @Override + public void addConnectionListener(TransportConnectionListener listener) { + if (this.listener != null) { + throw new IllegalStateException("listener already set"); + } + this.listener = listener; + } + + @Override + public boolean removeConnectionListener(TransportConnectionListener listener) { + if (listener == this.listener) { + this.listener = null; + return true; + } + return false; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 6654444066d52..7f818de29d430 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -53,6 +53,7 @@ import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; @@ -72,7 +73,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; @@ -169,17 +169,6 @@ protected TaskManager createTaskManager(Settings settings, ThreadPool threadPool } } - private volatile String executorName; - - public void setExecutorName(final String executorName) { - this.executorName = executorName; - } - - @Override - protected ExecutorService getExecutorService() { - return executorName == null ? super.getExecutorService() : getThreadPool().executor(executorName); - } - /** * Clears all the registered rules. */ @@ -559,8 +548,23 @@ public DelegateTransport(Transport transport) { } @Override - public void setTransportService(TransportService service) { - transport.setTransportService(service); + public void addConnectionListener(TransportConnectionListener listener) { + transport.addConnectionListener(listener); + } + + @Override + public boolean removeConnectionListener(TransportConnectionListener listener) { + return transport.removeConnectionListener(listener); + } + + @Override + public void registerRequestHandler(RequestHandlerRegistry reg) { + transport.registerRequestHandler(reg); + } + + @Override + public RequestHandlerRegistry getRequestHandler(String action) { + return transport.getRequestHandler(action); } @Override @@ -595,11 +599,6 @@ public List getLocalAddresses() { return transport.getLocalAddresses(); } - @Override - public long newRequestId() { - return transport.newRequestId(); - } - @Override public Connection getConnection(DiscoveryNode node) { return new FilteredConnection(transport.getConnection(node)) { @@ -627,6 +626,11 @@ public TransportStats getStats() { return transport.getStats(); } + @Override + public ResponseHandlers getResponseHandlers() { + return transport.getResponseHandlers(); + } + @Override public Lifecycle.State lifecycleState() { return transport.lifecycleState(); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index dd8dd5f81ffc9..50b7b8ce57597 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -205,7 +205,7 @@ public void assertNoPendingHandshakes(Transport transport) { public void testHelloWorld() { - serviceA.registerRequestHandler("sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); try { @@ -216,7 +216,7 @@ public void testHelloWorld() { } }); - TransportFuture res = serviceB.submitRequest(nodeA, "sayHello", + TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHello", new StringMessageRequest("moshe"), new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { @@ -247,7 +247,7 @@ public void handleException(TransportException exp) { assertThat(e.getMessage(), false, equalTo(true)); } - res = serviceB.submitRequest(nodeA, "sayHello", new StringMessageRequest("moshe"), + res = serviceB.submitRequest(nodeA, "internal:sayHello", new StringMessageRequest("moshe"), TransportRequestOptions.builder().withCompress(true).build(), new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { @@ -281,7 +281,8 @@ public void handleException(TransportException exp) { public void testThreadContext() throws ExecutionException, InterruptedException { - serviceA.registerRequestHandler("ping_pong", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { + serviceA.registerRequestHandler("internal:ping_pong", StringMessageRequest::new, ThreadPool.Names.GENERIC, + (request, channel, task) -> { assertEquals("ping_user", threadPool.getThreadContext().getHeader("test.ping.user")); assertNull(threadPool.getThreadContext().getTransient("my_private_context")); try { @@ -325,7 +326,7 @@ public void handleException(TransportException exp) { threadPool.getThreadContext().putHeader("test.ping.user", "ping_user"); threadPool.getThreadContext().putTransient("my_private_context", context); - TransportFuture res = serviceB.submitRequest(nodeA, "ping_pong", ping, responseHandler); + TransportFuture res = serviceB.submitRequest(nodeA, "internal:ping_pong", ping, responseHandler); StringMessageResponse message = res.get(); assertThat("pong", equalTo(message.message)); @@ -339,7 +340,7 @@ public void testLocalNodeConnection() throws InterruptedException { // this should be a noop serviceA.disconnectFromNode(nodeA); final AtomicReference exception = new AtomicReference<>(); - serviceA.registerRequestHandler("localNode", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:localNode", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { try { channel.sendResponse(new StringMessageResponse(request.message)); @@ -349,7 +350,8 @@ public void testLocalNodeConnection() throws InterruptedException { }); final AtomicReference responseString = new AtomicReference<>(); final CountDownLatch responseLatch = new CountDownLatch(1); - serviceA.sendRequest(nodeA, "localNode", new StringMessageRequest("test"), new TransportResponseHandler() { + serviceA.sendRequest(nodeA, "internal:localNode", new StringMessageRequest("test"), + new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); @@ -390,7 +392,7 @@ public void testAdapterSendReceiveCallbacks() throws Exception { fail(e.getMessage()); } }; - final String ACTION = "action"; + final String ACTION = "internal:action"; serviceA.registerRequestHandler(ACTION, TransportRequest.Empty::new, ThreadPool.Names.GENERIC, requestHandler); serviceB.registerRequestHandler(ACTION, TransportRequest.Empty::new, ThreadPool.Names.GENERIC, @@ -485,7 +487,7 @@ public void requestSent(DiscoveryNode node, long requestId, String action, Trans } public void testVoidMessageCompressed() { - serviceA.registerRequestHandler("sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { try { TransportResponseOptions responseOptions = TransportResponseOptions.builder().withCompress(true).build(); @@ -496,7 +498,7 @@ public void testVoidMessageCompressed() { } }); - TransportFuture res = serviceB.submitRequest(nodeA, "sayHello", + TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHello", TransportRequest.Empty.INSTANCE, TransportRequestOptions.builder().withCompress(true).build(), new TransportResponseHandler() { @Override @@ -529,7 +531,7 @@ public void handleException(TransportException exp) { } public void testHelloWorldCompressed() { - serviceA.registerRequestHandler("sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) { @@ -544,7 +546,7 @@ public void messageReceived(StringMessageRequest request, TransportChannel chann } }); - TransportFuture res = serviceB.submitRequest(nodeA, "sayHello", + TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHello", new StringMessageRequest("moshe"), TransportRequestOptions.builder().withCompress(true).build(), new TransportResponseHandler() { @Override @@ -578,7 +580,7 @@ public void handleException(TransportException exp) { } public void testErrorMessage() { - serviceA.registerRequestHandler("sayHelloException", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHelloException", StringMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) throws Exception { @@ -587,7 +589,7 @@ public void messageReceived(StringMessageRequest request, TransportChannel chann } }); - TransportFuture res = serviceB.submitRequest(nodeA, "sayHelloException", + TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHelloException", new StringMessageRequest("moshe"), new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { @@ -639,7 +641,7 @@ public void onNodeDisconnected(DiscoveryNode node) { public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierException, InterruptedException { Set sendingErrors = ConcurrentCollections.newConcurrentSet(); Set responseErrors = ConcurrentCollections.newConcurrentSet(); - serviceA.registerRequestHandler("test", TestRequest::new, + serviceA.registerRequestHandler("internal:test", TestRequest::new, randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC, (request, channel, task) -> { try { channel.sendResponse(new TestResponse()); @@ -656,7 +658,7 @@ public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierExcepti logger.trace("caught exception while responding from node B", e); } }; - serviceB.registerRequestHandler("test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler); + serviceB.registerRequestHandler("internal:test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler); int halfSenders = scaledRandomIntBetween(3, 10); final CyclicBarrier go = new CyclicBarrier(halfSenders * 2 + 1); @@ -712,7 +714,7 @@ protected void doRun() throws Exception { final String info = sender + "_" + iter; final DiscoveryNode node = nodeB; // capture now try { - serviceA.sendRequest(node, "test", new TestRequest(info), + serviceA.sendRequest(node, "internal:test", new TestRequest(info), new ActionListenerResponseHandler<>(listener, TestResponse::new)); try { listener.actionGet(); @@ -742,7 +744,7 @@ public void onAfter() { // simulate restart of nodeB serviceB.close(); MockTransportService newService = buildService("TS_B_" + i, version1, null); - newService.registerRequestHandler("test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler); + newService.registerRequestHandler("internal:test", TestRequest::new, ThreadPool.Names.SAME, ignoringRequestHandler); serviceB = newService; nodeB = newService.getLocalDiscoNode(); serviceB.connectToNode(nodeA); @@ -763,7 +765,7 @@ public void onAfter() { public void testNotifyOnShutdown() throws Exception { final CountDownLatch latch2 = new CountDownLatch(1); try { - serviceA.registerRequestHandler("foobar", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:foobar", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { try { latch2.await(); @@ -773,7 +775,7 @@ public void testNotifyOnShutdown() throws Exception { fail(e.getMessage()); } }); - TransportFuture foobar = serviceB.submitRequest(nodeA, "foobar", + TransportFuture foobar = serviceB.submitRequest(nodeA, "internal:foobar", new StringMessageRequest(""), TransportRequestOptions.EMPTY, EmptyTransportResponseHandler.INSTANCE_SAME); latch2.countDown(); try { @@ -789,7 +791,7 @@ public void testNotifyOnShutdown() throws Exception { } public void testTimeoutSendExceptionWithNeverSendingBackResponse() throws Exception { - serviceA.registerRequestHandler("sayHelloTimeoutNoResponse", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHelloTimeoutNoResponse", StringMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) { @@ -798,7 +800,7 @@ public void messageReceived(StringMessageRequest request, TransportChannel chann } }); - TransportFuture res = serviceB.submitRequest(nodeA, "sayHelloTimeoutNoResponse", + TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHelloTimeoutNoResponse", new StringMessageRequest("moshe"), TransportRequestOptions.builder().withTimeout(100).build(), new TransportResponseHandler() { @Override @@ -834,7 +836,7 @@ public void testTimeoutSendExceptionWithDelayedResponse() throws Exception { CountDownLatch waitForever = new CountDownLatch(1); CountDownLatch doneWaitingForever = new CountDownLatch(1); Semaphore inFlight = new Semaphore(Integer.MAX_VALUE); - serviceA.registerRequestHandler("sayHelloTimeoutDelayedResponse", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHelloTimeoutDelayedResponse", StringMessageRequest::new, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel, Task task) throws InterruptedException { @@ -862,7 +864,7 @@ public void messageReceived(StringMessageRequest request, TransportChannel chann } }); final CountDownLatch latch = new CountDownLatch(1); - TransportFuture res = serviceB.submitRequest(nodeA, "sayHelloTimeoutDelayedResponse", + TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHelloTimeoutDelayedResponse", new StringMessageRequest("forever"), TransportRequestOptions.builder().withTimeout(100).build(), new TransportResponseHandler() { @Override @@ -900,7 +902,7 @@ public void handleException(TransportException exp) { for (int i = 0; i < 10; i++) { final int counter = i; // now, try and send another request, this times, with a short timeout - TransportFuture result = serviceB.submitRequest(nodeA, "sayHelloTimeoutDelayedResponse", + TransportFuture result = serviceB.submitRequest(nodeA, "internal:sayHelloTimeoutDelayedResponse", new StringMessageRequest(counter + "ms"), TransportRequestOptions.builder().withTimeout(3000).build(), new TransportResponseHandler() { @Override @@ -975,12 +977,12 @@ public String executor() { } }; - serviceA.registerRequestHandler("test", StringMessageRequest::new, ThreadPool.Names.SAME, handler); - serviceA.registerRequestHandler("testError", StringMessageRequest::new, ThreadPool.Names.SAME, handlerWithError); - serviceB.registerRequestHandler("test", StringMessageRequest::new, ThreadPool.Names.SAME, handler); - serviceB.registerRequestHandler("testError", StringMessageRequest::new, ThreadPool.Names.SAME, handlerWithError); + serviceA.registerRequestHandler("internal:test", StringMessageRequest::new, ThreadPool.Names.SAME, handler); + serviceA.registerRequestHandler("internal:testError", StringMessageRequest::new, ThreadPool.Names.SAME, handlerWithError); + serviceB.registerRequestHandler("internal:test", StringMessageRequest::new, ThreadPool.Names.SAME, handler); + serviceB.registerRequestHandler("internal:testError", StringMessageRequest::new, ThreadPool.Names.SAME, handlerWithError); - final Tracer tracer = new Tracer(new HashSet<>(Arrays.asList("test", "testError"))); + final Tracer tracer = new Tracer(new HashSet<>(Arrays.asList("internal:test", "internal:testError"))); // the tracer is invoked concurrently after the actual action is executed. that means a Tracer#requestSent can still be in-flight // from a handshake executed on connect in the setup method. this might confuse this test since it expects exact number of // invocations. To prevent any unrelated events messing with this test we filter on the actions we execute in this test. @@ -991,7 +993,7 @@ public String executor() { boolean timeout = randomBoolean(); TransportRequestOptions options = timeout ? TransportRequestOptions.builder().withTimeout(1).build() : TransportRequestOptions.EMPTY; - serviceA.sendRequest(nodeB, "test", new StringMessageRequest("", 10), options, noopResponseHandler); + serviceA.sendRequest(nodeB, "internal:test", new StringMessageRequest("", 10), options, noopResponseHandler); requestCompleted.acquire(); tracer.expectedEvents.get().await(); assertThat("didn't see request sent", tracer.sawRequestSent, equalTo(true)); @@ -1001,7 +1003,7 @@ public String executor() { assertThat("saw error sent", tracer.sawErrorSent, equalTo(false)); tracer.reset(4); - serviceA.sendRequest(nodeB, "testError", new StringMessageRequest(""), noopResponseHandler); + serviceA.sendRequest(nodeB, "internal:testError", new StringMessageRequest(""), noopResponseHandler); requestCompleted.acquire(); tracer.expectedEvents.get().await(); assertThat("didn't see request sent", tracer.sawRequestSent, equalTo(true)); @@ -1017,7 +1019,7 @@ public String executor() { includeSettings = randomBoolean() ? "*" : ""; excludeSettings = "*Error"; } else { - includeSettings = "test"; + includeSettings = "internal:test"; excludeSettings = "DOESN'T_MATCH"; } clusterSettings.applySettings(Settings.builder() @@ -1026,7 +1028,7 @@ public String executor() { .build()); tracer.reset(4); - serviceA.sendRequest(nodeB, "test", new StringMessageRequest(""), noopResponseHandler); + serviceA.sendRequest(nodeB, "internal:test", new StringMessageRequest(""), noopResponseHandler); requestCompleted.acquire(); tracer.expectedEvents.get().await(); assertThat("didn't see request sent", tracer.sawRequestSent, equalTo(true)); @@ -1036,7 +1038,7 @@ public String executor() { assertThat("saw error sent", tracer.sawErrorSent, equalTo(false)); tracer.reset(2); - serviceA.sendRequest(nodeB, "testError", new StringMessageRequest(""), noopResponseHandler); + serviceA.sendRequest(nodeB, "internal:testError", new StringMessageRequest(""), noopResponseHandler); requestCompleted.acquire(); tracer.expectedEvents.get().await(); assertThat("saw request sent", tracer.sawRequestSent, equalTo(false)); @@ -1255,7 +1257,7 @@ public void writeTo(StreamOutput out) throws IOException { } public void testVersionFrom0to1() throws Exception { - serviceB.registerRequestHandler("/version", Version1Request::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:version", Version1Request::new, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(Version1Request request, TransportChannel channel, Task task) throws Exception { @@ -1271,7 +1273,7 @@ public void messageReceived(Version1Request request, TransportChannel channel, T Version0Request version0Request = new Version0Request(); version0Request.value1 = 1; - Version0Response version0Response = serviceA.submitRequest(nodeB, "/version", version0Request, + Version0Response version0Response = serviceA.submitRequest(nodeB, "internal:version", version0Request, new TransportResponseHandler() { @Override public Version0Response newInstance() { @@ -1299,7 +1301,7 @@ public String executor() { } public void testVersionFrom1to0() throws Exception { - serviceA.registerRequestHandler("/version", Version0Request::new, ThreadPool.Names.SAME, + serviceA.registerRequestHandler("internal:version", Version0Request::new, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(Version0Request request, TransportChannel channel, Task task) throws Exception { @@ -1314,7 +1316,7 @@ public void messageReceived(Version0Request request, TransportChannel channel, T Version1Request version1Request = new Version1Request(); version1Request.value1 = 1; version1Request.value2 = 2; - Version1Response version1Response = serviceB.submitRequest(nodeA, "/version", version1Request, + Version1Response version1Response = serviceB.submitRequest(nodeA, "internal:version", version1Request, new TransportResponseHandler() { @Override public Version1Response newInstance() { @@ -1344,7 +1346,7 @@ public String executor() { } public void testVersionFrom1to1() throws Exception { - serviceB.registerRequestHandler("/version", Version1Request::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:version", Version1Request::new, ThreadPool.Names.SAME, (request, channel, task) -> { assertThat(request.value1, equalTo(1)); assertThat(request.value2, equalTo(2)); @@ -1358,7 +1360,7 @@ public void testVersionFrom1to1() throws Exception { Version1Request version1Request = new Version1Request(); version1Request.value1 = 1; version1Request.value2 = 2; - Version1Response version1Response = serviceB.submitRequest(nodeB, "/version", version1Request, + Version1Response version1Response = serviceB.submitRequest(nodeB, "internal:version", version1Request, new TransportResponseHandler() { @Override public Version1Response newInstance() { @@ -1388,7 +1390,7 @@ public String executor() { } public void testVersionFrom0to0() throws Exception { - serviceA.registerRequestHandler("/version", Version0Request::new, ThreadPool.Names.SAME, + serviceA.registerRequestHandler("internal:version", Version0Request::new, ThreadPool.Names.SAME, (request, channel, task) -> { assertThat(request.value1, equalTo(1)); Version0Response response = new Version0Response(); @@ -1399,7 +1401,7 @@ public void testVersionFrom0to0() throws Exception { Version0Request version0Request = new Version0Request(); version0Request.value1 = 1; - Version0Response version0Response = serviceA.submitRequest(nodeA, "/version", version0Request, + Version0Response version0Response = serviceA.submitRequest(nodeA, "internal:version", version0Request, new TransportResponseHandler() { @Override public Version0Response newInstance() { @@ -1427,7 +1429,7 @@ public String executor() { } public void testMockFailToSendNoConnectRule() throws Exception { - serviceA.registerRequestHandler("sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); throw new RuntimeException("bad message !!!"); @@ -1435,7 +1437,7 @@ public void testMockFailToSendNoConnectRule() throws Exception { serviceB.addFailToSendNoConnectRule(serviceA); - TransportFuture res = serviceB.submitRequest(nodeA, "sayHello", + TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHello", new StringMessageRequest("moshe"), new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { @@ -1484,7 +1486,7 @@ public void handleException(TransportException exp) { } public void testMockUnresponsiveRule() throws IOException { - serviceA.registerRequestHandler("sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, + serviceA.registerRequestHandler("internal:sayHello", StringMessageRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { assertThat("moshe", equalTo(request.message)); throw new RuntimeException("bad message !!!"); @@ -1492,7 +1494,7 @@ public void testMockUnresponsiveRule() throws IOException { serviceB.addUnresponsiveRule(serviceA); - TransportFuture res = serviceB.submitRequest(nodeA, "sayHello", + TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHello", new StringMessageRequest("moshe"), TransportRequestOptions.builder().withTimeout(100).build(), new TransportResponseHandler() { @Override @@ -1539,15 +1541,12 @@ public void testHostOnMessages() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(2); final AtomicReference addressA = new AtomicReference<>(); final AtomicReference addressB = new AtomicReference<>(); - serviceB.registerRequestHandler("action1", TestRequest::new, ThreadPool.Names.SAME, new TransportRequestHandler() { - @Override - public void messageReceived(TestRequest request, TransportChannel channel, Task task) throws Exception { - addressA.set(request.remoteAddress()); - channel.sendResponse(new TestResponse()); - latch.countDown(); - } + serviceB.registerRequestHandler("internal:action1", TestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { + addressA.set(request.remoteAddress()); + channel.sendResponse(new TestResponse()); + latch.countDown(); }); - serviceA.sendRequest(nodeB, "action1", new TestRequest(), new TransportResponseHandler() { + serviceA.sendRequest(nodeB, "internal:action1", new TestRequest(), new TransportResponseHandler() { @Override public TestResponse newInstance() { return new TestResponse(); @@ -1582,7 +1581,7 @@ public void testBlockingIncomingRequests() throws Exception { try (TransportService service = buildService("TS_TEST", version0, null, Settings.EMPTY, false, false)) { AtomicBoolean requestProcessed = new AtomicBoolean(false); - service.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, + service.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { requestProcessed.set(true); channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -1594,7 +1593,7 @@ public void testBlockingIncomingRequests() throws Exception { Settings.EMPTY, true, false); try (Transport.Connection connection = serviceA.openConnection(node, null)) { CountDownLatch latch = new CountDownLatch(1); - serviceA.sendRequest(connection, "action", new TestRequest(), TransportRequestOptions.EMPTY, + serviceA.sendRequest(connection, "internal:action", new TestRequest(), TransportRequestOptions.EMPTY, new TransportResponseHandler() { @Override public TestResponse newInstance() { @@ -1756,7 +1755,7 @@ public void messageReceived(TestRequest request, TransportChannel channel, Task if (randomBoolean() && request.resendCount++ < 20) { DiscoveryNode node = randomFrom(nodeA, nodeB, nodeC); logger.debug("send secondary request from {} to {} - {}", toNodeMap.get(service), node, request.info); - service.sendRequest(node, "action1", new TestRequest("secondary " + request.info), + service.sendRequest(node, "internal:action1", new TestRequest("secondary " + request.info), TransportRequestOptions.builder().withCompress(randomBoolean()).build(), new TransportResponseHandler() { @Override @@ -1800,11 +1799,11 @@ public String executor() { } } - serviceB.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceB.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), new TestRequestHandler(serviceB)); - serviceC.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceC.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), new TestRequestHandler(serviceC)); - serviceA.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceA.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), new TestRequestHandler(serviceA)); int iters = randomIntBetween(30, 60); CountDownLatch allRequestsDone = new CountDownLatch(iters); @@ -1847,7 +1846,7 @@ public String executor() { TransportService service = randomFrom(serviceC, serviceB, serviceA); DiscoveryNode node = randomFrom(nodeC, nodeB, nodeA); logger.debug("send from {} to {}", toNodeMap.get(service), node); - service.sendRequest(node, "action1", new TestRequest("REQ[" + i + "]"), + service.sendRequest(node, "internal:action1", new TestRequest("REQ[" + i + "]"), TransportRequestOptions.builder().withCompress(randomBoolean()).build(), new TestResponseHandler(i)); } logger.debug("waiting for response"); @@ -1868,18 +1867,19 @@ public String executor() { } public void testRegisterHandlerTwice() { - serviceB.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceB.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), (request, message, task) -> { throw new AssertionError("boom"); }); expectThrows(IllegalArgumentException.class, () -> - serviceB.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceB.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, + ThreadPool.Names.GENERIC), (request, message, task) -> { throw new AssertionError("boom"); }) ); - serviceA.registerRequestHandler("action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), + serviceA.registerRequestHandler("internal:action1", TestRequest::new, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC), (request, message, task) -> { throw new AssertionError("boom"); }); @@ -2066,7 +2066,7 @@ public void run() { public void testResponseHeadersArePreserved() throws InterruptedException { List executors = new ArrayList<>(ThreadPool.THREAD_POOL_TYPES.keySet()); CollectionUtil.timSort(executors); // makes sure it's reproducible - serviceA.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, + serviceA.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { threadPool.getThreadContext().putTransient("boom", new Object()); @@ -2118,8 +2118,8 @@ public String executor() { } }; - serviceB.sendRequest(nodeA, "action", new TestRequest(randomFrom("fail", "pass")), transportResponseHandler); - serviceA.sendRequest(nodeA, "action", new TestRequest(randomFrom("fail", "pass")), transportResponseHandler); + serviceB.sendRequest(nodeA, "internal:action", new TestRequest(randomFrom("fail", "pass")), transportResponseHandler); + serviceA.sendRequest(nodeA, "internal:action", new TestRequest(randomFrom("fail", "pass")), transportResponseHandler); latch.await(); } @@ -2127,7 +2127,7 @@ public void testHandlerIsInvokedOnConnectionClose() throws IOException, Interrup List executors = new ArrayList<>(ThreadPool.THREAD_POOL_TYPES.keySet()); CollectionUtil.timSort(executors); // makes sure it's reproducible TransportService serviceC = build(Settings.builder().put("name", "TS_TEST").build(), version0, null, true); - serviceC.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, + serviceC.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { // do nothing }); @@ -2177,7 +2177,7 @@ public String executor() { TransportRequestOptions.Type.STATE); try (Transport.Connection connection = serviceB.openConnection(serviceC.getLocalNode(), builder.build())) { serviceC.close(); - serviceB.sendRequest(connection, "action", new TestRequest("boom"), TransportRequestOptions.EMPTY, + serviceB.sendRequest(connection, "internal:action", new TestRequest("boom"), TransportRequestOptions.EMPTY, transportResponseHandler); } latch.await(); @@ -2187,7 +2187,7 @@ public void testConcurrentDisconnectOnNonPublishedConnection() throws IOExceptio MockTransportService serviceC = build(Settings.builder().put("name", "TS_TEST").build(), version0, null, true); CountDownLatch receivedLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1); - serviceC.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, + serviceC.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { // don't block on a network thread here threadPool.generic().execute(new AbstractRunnable() { @@ -2242,7 +2242,7 @@ public String executor() { TransportRequestOptions.Type.STATE); try (Transport.Connection connection = serviceB.openConnection(serviceC.getLocalNode(), builder.build())) { - serviceB.sendRequest(connection, "action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, + serviceB.sendRequest(connection, "internal:action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, transportResponseHandler); receivedLatch.await(); serviceC.close(); @@ -2255,7 +2255,7 @@ public void testTransportStats() throws Exception { MockTransportService serviceC = build(Settings.builder().put("name", "TS_TEST").build(), version0, null, true); CountDownLatch receivedLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1); - serviceB.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { // don't block on a network thread here threadPool.generic().execute(new AbstractRunnable() { @@ -2322,7 +2322,7 @@ public String executor() { assertEquals(25, transportStats.getRxSize().getBytes()); assertEquals(45, transportStats.getTxSize().getBytes()); }); - serviceC.sendRequest(connection, "action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, + serviceC.sendRequest(connection, "internal:action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, transportResponseHandler); receivedLatch.await(); assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here @@ -2330,7 +2330,7 @@ public String executor() { assertEquals(1, transportStats.getRxCount()); assertEquals(2, transportStats.getTxCount()); assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(92, transportStats.getTxSize().getBytes()); + assertEquals(101, transportStats.getTxSize().getBytes()); }); sendResponseLatch.countDown(); responseLatch.await(); @@ -2338,7 +2338,7 @@ public String executor() { assertEquals(2, stats.getRxCount()); assertEquals(2, stats.getTxCount()); assertEquals(46, stats.getRxSize().getBytes()); - assertEquals(92, stats.getTxSize().getBytes()); + assertEquals(101, stats.getTxSize().getBytes()); } finally { serviceC.close(); } @@ -2368,7 +2368,7 @@ public void testTransportStatsWithException() throws Exception { CountDownLatch sendResponseLatch = new CountDownLatch(1); Exception ex = new RuntimeException("boom"); ex.setStackTrace(new StackTraceElement[0]); - serviceB.registerRequestHandler("action", TestRequest::new, ThreadPool.Names.SAME, + serviceB.registerRequestHandler("internal:action", TestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> { // don't block on a network thread here threadPool.generic().execute(new AbstractRunnable() { @@ -2437,7 +2437,7 @@ public String executor() { assertEquals(25, transportStats.getRxSize().getBytes()); assertEquals(45, transportStats.getTxSize().getBytes()); }); - serviceC.sendRequest(connection, "action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, + serviceC.sendRequest(connection, "internal:action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, transportResponseHandler); receivedLatch.await(); assertBusy(() -> { // netty for instance invokes this concurrently so we better use assert busy here @@ -2445,7 +2445,7 @@ public String executor() { assertEquals(1, transportStats.getRxCount()); assertEquals(2, transportStats.getTxCount()); assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(92, transportStats.getTxSize().getBytes()); + assertEquals(101, transportStats.getTxSize().getBytes()); }); sendResponseLatch.countDown(); responseLatch.await(); @@ -2460,7 +2460,7 @@ public String executor() { // 49 bytes are the non-exception message bytes that have been received. It should include the initial // handshake message and the header, version, etc bytes in the exception message. assertEquals(failedMessage, 49 + streamOutput.bytes().length(), stats.getRxSize().getBytes()); - assertEquals(92, stats.getTxSize().getBytes()); + assertEquals(101, stats.getTxSize().getBytes()); } finally { serviceC.close(); } @@ -2638,10 +2638,9 @@ public void testProfilesIncludesDefault() { .toSet())); } - public void testChannelCloseWhileConnecting() throws IOException { + public void testChannelCloseWhileConnecting() { try (MockTransportService service = build(Settings.builder().put("name", "close").build(), version0, null, true)) { - service.setExecutorName(ThreadPool.Names.SAME); // make sure stuff is executed in a blocking fashion - service.addConnectionListener(new TransportConnectionListener() { + service.transport.addConnectionListener(new TransportConnectionListener() { @Override public void onConnectionOpened(final Transport.Connection connection) { try { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java index 01991670d5565..2ee6716262f03 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/action/TransportXPackInfoActionTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.license.XPackInfoResponse.FeatureSetsInfo.FeatureSet; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackFeatureSet; @@ -52,8 +53,8 @@ public void testDoExecute() throws Exception { featureSets.add(fs); } - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportXPackInfoAction action = new TransportXPackInfoAction(Settings.EMPTY, transportService, mock(ActionFilters.class), licenseService, featureSets); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java index 675c438b87fa2..c8238ab49b146 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.transport; -import java.util.Map; - import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.xpack.core.XPackSettings; @@ -24,15 +22,13 @@ protected Settings transportClientSettings() { public void testSecurityServerTransportServiceWrapsAllHandlers() { for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { - for (Map.Entry entry : transportService.requestHandlers.entrySet()) { - RequestHandlerRegistry handler = entry.getValue(); - assertEquals( - "handler not wrapped by " + SecurityServerTransportInterceptor.ProfileSecuredRequestHandler.class + - "; do all the handler registration methods have overrides?", - handler.toString(), - "ProfileSecuredRequestHandler{action='" + handler.getAction() + "', executorName='" + handler.getExecutor() - + "', forceExecution=" + handler.isForceExecution() + "}"); - } + RequestHandlerRegistry handler = transportService.transport.getRequestHandler(TransportService.HANDSHAKE_ACTION_NAME); + assertEquals( + "handler not wrapped by " + SecurityServerTransportInterceptor.ProfileSecuredRequestHandler.class + + "; do all the handler registration methods have overrides?", + handler.toString(), + "ProfileSecuredRequestHandler{action='" + handler.getAction() + "', executorName='" + handler.getExecutor() + + "', forceExecution=" + handler.isForceExecution() + "}"); } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java index 572e948b26e80..94856f701fa05 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportDeleteRoleActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; @@ -42,8 +43,8 @@ public class TransportDeleteRoleActionTests extends ESTestCase { public void testReservedRole() { final String roleName = randomFrom(new ArrayList<>(ReservedRolesStore.names())); NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - (x) -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, (x) -> null, null, Collections.emptySet()); TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService); @@ -73,8 +74,8 @@ public void onFailure(Exception e) { public void testValidRole() { final String roleName = randomFrom("admin", "dept_a", "restricted"); NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - (x) -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, (x) -> null, null, Collections.emptySet()); TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService); @@ -117,8 +118,8 @@ public void testException() { final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException()); final String roleName = randomFrom("admin", "dept_a", "restricted"); NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - (x) -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, (x) -> null, null, Collections.emptySet()); TransportDeleteRoleAction action = new TransportDeleteRoleAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java index 672a24eb45d39..eecf3f0202f32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.role.GetRolesRequest; import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; @@ -42,8 +43,8 @@ public class TransportGetRolesActionTests extends ESTestCase { public void testReservedRoles() { NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService, new ReservedRolesStore()); @@ -88,8 +89,8 @@ public void onFailure(Exception e) { public void testStoreRoles() { final List storeRoleDescriptors = randomRoleDescriptors(); NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService, new ReservedRolesStore()); @@ -140,8 +141,8 @@ public void testGetAllOrMix() { } NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService, new ReservedRolesStore()); @@ -204,8 +205,8 @@ public void testException() { final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException()); final List storeRoleDescriptors = randomRoleDescriptors(); NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetRolesAction action = new TransportGetRolesAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService, new ReservedRolesStore()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java index eb606314788c9..1634462b27dee 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/TransportPutRoleActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; @@ -43,8 +44,8 @@ public class TransportPutRoleActionTests extends ESTestCase { public void testReservedRole() { final String roleName = randomFrom(new ArrayList<>(ReservedRolesStore.names())); NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService); PutRoleRequest request = new PutRoleRequest(); @@ -73,8 +74,8 @@ public void onFailure(Exception e) { public void testValidRole() { final String roleName = randomFrom("admin", "dept_a", "restricted"); NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService); final boolean created = randomBoolean(); @@ -116,8 +117,8 @@ public void testException() { final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException()); final String roleName = randomFrom("admin", "dept_a", "restricted"); NativeRolesStore rolesStore = mock(NativeRolesStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportPutRoleAction action = new TransportPutRoleAction(Settings.EMPTY, mock(ActionFilters.class), rolesStore, transportService); PutRoleRequest request = new PutRoleRequest(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java index d10020cd78b3f..67df9013e752e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; @@ -44,7 +45,7 @@ public class TransportGetRoleMappingsActionTests extends ESTestCase { @Before public void setupMocks() { store = mock(NativeRoleMappingStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); action = new TransportGetRoleMappingsAction(Settings.EMPTY, mock(ActionFilters.class), transportService, store); @@ -110,4 +111,4 @@ public void testGetAllRoles() throws Exception { assertThat(namesRef.get(), Matchers.nullValue(Set.class)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java index 68a957c9c3c14..d3a0cd2e9c715 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; @@ -40,7 +41,7 @@ public class TransportPutRoleMappingActionTests extends ESTestCase { @Before public void setupMocks() { store = mock(NativeRoleMappingStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); action = new TransportPutRoleMappingAction(Settings.EMPTY, mock(ActionFilters.class), transportService, store); @@ -88,4 +89,4 @@ private PutRoleMappingResponse put(String name, FieldExpression expression, Stri action.doExecute(mock(Task.class), request, future); return future.get(); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index bec6038b65580..158a74308d97f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.client.NoOpClient; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.action.saml.SamlInvalidateSessionRequest; @@ -166,7 +167,7 @@ void doExecute(Action action, Request request, ActionListener null, null, Collections.emptySet()); final Realms realms = mock(Realms.class); action = new TransportSamlInvalidateSessionAction(settings, transportService, mock(ActionFilters.class),tokenService, realms); @@ -319,4 +320,4 @@ private Tuple storeToken(SamlNameId nameId, String session) t return future.actionGet(); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 1185fa29986b0..37cbb5ef27940 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.XPackSettings; @@ -181,7 +182,7 @@ public void setup() throws Exception { final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); tokenService = new TokenService(settings, Clock.systemUTC(), client, securityIndex, clusterService); - final TransportService transportService = new TransportService(Settings.EMPTY, null, null, + final TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); final Realms realms = mock(Realms.class); action = new TransportSamlLogoutAction(settings, transportService, mock(ActionFilters.class), realms, tokenService); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java index 96b8b4fe25764..a8e2464805825 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; @@ -37,8 +38,8 @@ public class TransportAuthenticateActionTests extends ESTestCase { public void testInternalUser() { SecurityContext securityContext = mock(SecurityContext.class); when(securityContext.getUser()).thenReturn(randomFrom(SystemUser.INSTANCE, XPackUser.INSTANCE)); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, transportService, mock(ActionFilters.class), securityContext); @@ -63,8 +64,8 @@ public void onFailure(Exception e) { public void testNullUser() { SecurityContext securityContext = mock(SecurityContext.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, transportService, mock(ActionFilters.class), securityContext); @@ -91,8 +92,8 @@ public void testValidUser() { final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); SecurityContext securityContext = mock(SecurityContext.class); when(securityContext.getUser()).thenReturn(user); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportAuthenticateAction action = new TransportAuthenticateAction(Settings.EMPTY, transportService, mock(ActionFilters.class), securityContext); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java index a88478c50ec7d..516b33cbaccfa 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportChangePasswordActionTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; @@ -53,7 +54,7 @@ public void testAnonymousUser() { NativeUsersStore usersStore = mock(NativeUsersStore.class); Settings passwordHashingSettings = Settings.builder(). put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), hashingAlgorithm).build(); - TransportService transportService = new TransportService(passwordHashingSettings, null, null, + TransportService transportService = new TransportService(passwordHashingSettings, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportChangePasswordAction action = new TransportChangePasswordAction(settings, transportService, mock(ActionFilters.class), usersStore); @@ -88,7 +89,7 @@ public void testInternalUsers() { NativeUsersStore usersStore = mock(NativeUsersStore.class); Settings passwordHashingSettings = Settings.builder(). put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), hashingAlgorithm).build(); - TransportService transportService = new TransportService(passwordHashingSettings, null, null, + TransportService transportService = new TransportService(passwordHashingSettings, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportChangePasswordAction action = new TransportChangePasswordAction(Settings.EMPTY, transportService, mock(ActionFilters.class), usersStore); @@ -133,8 +134,8 @@ public void testValidUser() { listener.onResponse(null); return null; }).when(usersStore).changePassword(eq(request), any(ActionListener.class)); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); Settings passwordHashingSettings = Settings.builder(). put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), hashingAlgorithm).build(); TransportChangePasswordAction action = new TransportChangePasswordAction(passwordHashingSettings, transportService, @@ -168,8 +169,8 @@ public void testIncorrectPasswordHashingAlgorithm() { request.passwordHash(hasher.hash(SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING)); final AtomicReference throwableRef = new AtomicReference<>(); final AtomicReference responseRef = new AtomicReference<>(); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); Settings passwordHashingSettings = Settings.builder().put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), randomFrom("pbkdf2_50000", "pbkdf2_100000", "bcrypt11", "bcrypt8", "bcrypt")).build(); TransportChangePasswordAction action = new TransportChangePasswordAction(passwordHashingSettings, transportService, @@ -209,13 +210,12 @@ public Void answer(InvocationOnMock invocation) { return null; } }).when(usersStore).changePassword(eq(request), any(ActionListener.class)); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); Settings passwordHashingSettings = Settings.builder(). put(XPackSettings.PASSWORD_HASHING_ALGORITHM.getKey(), hashingAlgorithm).build(); TransportChangePasswordAction action = new TransportChangePasswordAction(passwordHashingSettings, transportService, mock(ActionFilters.class), usersStore); - final AtomicReference throwableRef = new AtomicReference<>(); final AtomicReference responseRef = new AtomicReference<>(); action.doExecute(mock(Task.class), request, new ActionListener() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java index ed7f9cff6e25e..4e6e0b3551bc2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportDeleteUserActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequest; import org.elasticsearch.xpack.core.security.action.user.DeleteUserResponse; @@ -46,8 +47,8 @@ public class TransportDeleteUserActionTests extends ESTestCase { public void testAnonymousUser() { Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "superuser").build(); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportDeleteUserAction action = new TransportDeleteUserAction(settings, mock(ActionFilters.class), usersStore, transportService); DeleteUserRequest request = new DeleteUserRequest(new AnonymousUser(settings).principal()); @@ -74,8 +75,8 @@ public void onFailure(Exception e) { public void testInternalUser() { NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService); @@ -104,8 +105,8 @@ public void onFailure(Exception e) { public void testReservedUser() { final User reserved = randomFrom(new ElasticUser(true), new KibanaUser(true)); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService); @@ -134,8 +135,8 @@ public void onFailure(Exception e) { public void testValidUser() { final User user = new User("joe"); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService); @@ -175,8 +176,8 @@ public void testException() { final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException(), new RuntimeException()); final User user = new User("joe"); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportDeleteUserAction action = new TransportDeleteUserAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java index b11a57c2d678a..1c5f93187c059 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.user.GetUsersRequest; import org.elasticsearch.xpack.core.security.action.user.GetUsersResponse; @@ -90,8 +91,8 @@ public void testAnonymousUser() { AnonymousUser anonymousUser = new AnonymousUser(settings); ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, anonymousUser, securityIndex, threadPool); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService, reservedRealm); @@ -125,8 +126,8 @@ public void onFailure(Exception e) { public void testInternalUser() { NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService, mock(ReservedRealm.class)); @@ -168,7 +169,7 @@ public void testReservedUsersOnly() { final int size = randomIntBetween(1, allReservedUsers.size()); final List reservedUsers = randomSubsetOf(size, allReservedUsers); final List names = reservedUsers.stream().map(User::principal).collect(Collectors.toList()); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService, reservedRealm); @@ -208,7 +209,7 @@ public void testGetAllUsers() { ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, new AnonymousUser(settings), securityIndex, threadPool); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService, reservedRealm); @@ -255,7 +256,7 @@ public void testGetStoreOnlyUsers() { randomFrom(Collections.singletonList(new User("joe")), Arrays.asList(new User("jane"), new User("fred")), randomUsers()); final String[] storeUsernames = storeUsers.stream().map(User::principal).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService, mock(ReservedRealm.class)); @@ -303,7 +304,7 @@ public void testException() { randomFrom(Collections.singletonList(new User("joe")), Arrays.asList(new User("jane"), new User("fred")), randomUsers()); final String[] storeUsernames = storeUsers.stream().map(User::principal).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService, mock(ReservedRealm.class)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java index 9f4d7c957b46c..2c49c8d595e0c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesActionTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; @@ -58,9 +59,8 @@ public void setup() { user = new User(randomAlphaOfLengthBetween(4, 12)); final ThreadPool threadPool = mock(ThreadPool.class); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService - .NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + final TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); final Authentication authentication = mock(Authentication.class); threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); @@ -309,4 +309,4 @@ private static MapBuilder mapBuilder() { return MapBuilder.newMapBuilder(); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java index 99e35f1b2145e..fff2479aa5d8b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; import org.elasticsearch.xpack.core.security.action.user.PutUserResponse; @@ -57,8 +58,8 @@ public void testAnonymousUser() { Settings settings = Settings.builder().put(AnonymousUser.ROLES_SETTING.getKey(), "superuser").build(); final AnonymousUser anonymousUser = new AnonymousUser(settings); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportPutUserAction action = new TransportPutUserAction(settings, mock(ActionFilters.class), usersStore, transportService); PutUserRequest request = new PutUserRequest(); @@ -86,8 +87,8 @@ public void onFailure(Exception e) { public void testSystemUser() { NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService); PutUserRequest request = new PutUserRequest(); @@ -126,8 +127,8 @@ public void testReservedUser() { PlainActionFuture> userFuture = new PlainActionFuture<>(); reservedRealm.users(userFuture); final User reserved = randomFrom(userFuture.actionGet().toArray(new User[0])); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService); @@ -156,8 +157,8 @@ public void onFailure(Exception e) { public void testValidUser() { final User user = new User("joe"); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService); @@ -203,8 +204,8 @@ public void testException() { final Exception e = randomFrom(new ElasticsearchSecurityException(""), new IllegalStateException(), new ValidationException()); final User user = new User("joe"); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportPutUserAction action = new TransportPutUserAction(Settings.EMPTY, mock(ActionFilters.class), usersStore, transportService); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java index 1c2eb8a9a1503..d811b6359b186 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportSetEnabledActionTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.user.SetEnabledRequest; import org.elasticsearch.xpack.core.security.action.user.SetEnabledResponse; @@ -62,8 +63,8 @@ public void testAnonymousUser() { threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); when(authentication.getUser()).thenReturn(user); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportSetEnabledAction action = new TransportSetEnabledAction(settings, threadPool, transportService, mock(ActionFilters.class), usersStore); @@ -100,8 +101,8 @@ public void testInternalUser() { threadContext.putTransient(AuthenticationField.AUTHENTICATION_KEY, authentication); when(authentication.getUser()).thenReturn(user); NativeUsersStore usersStore = mock(NativeUsersStore.class); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService, mock(ActionFilters.class), usersStore); @@ -154,8 +155,8 @@ public Void answer(InvocationOnMock invocation) { } }).when(usersStore) .setEnabled(eq(user.principal()), eq(request.enabled()), eq(request.getRefreshPolicy()), any(ActionListener.class)); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService, mock(ActionFilters.class), usersStore); @@ -206,8 +207,8 @@ public Void answer(InvocationOnMock invocation) { } }).when(usersStore) .setEnabled(eq(user.principal()), eq(request.enabled()), eq(request.getRefreshPolicy()), any(ActionListener.class)); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService, mock(ActionFilters.class), usersStore); @@ -246,8 +247,8 @@ public void testUserModifyingThemselves() { request.username(user.principal()); request.enabled(randomBoolean()); request.setRefreshPolicy(randomFrom(RefreshPolicy.values())); - TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, null, Collections.emptySet()); + TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportSetEnabledAction action = new TransportSetEnabledAction(Settings.EMPTY, threadPool, transportService, mock(ActionFilters.class), usersStore); From 18c17dfb35ba84ceb3fb78e166d73d9462ef8290 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 4 Jul 2018 10:48:00 +0100 Subject: [PATCH 34/36] S3 fixture should report 404 on unknown bucket (#31782) Today, `AmazonS3Fixture` returns 403 on attempts to access any inappropriate bucket, whether known or otherwise. In fact, S3 reports 404 on nonexistent buckets and 403 on inaccessible ones. This change enhances `AmazonS3Fixture` to distinguish these cases. --- .../elasticsearch/repositories/s3/AmazonS3Fixture.java | 9 +++++++-- .../20_repository_permanent_credentials.yml | 4 ---- .../30_repository_temporary_credentials.yml | 4 ---- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java index fcb208258aa03..e21f2bf71496b 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java @@ -96,8 +96,13 @@ protected Response handle(final Request request) throws IOException { if (handler != null) { final String bucket = request.getParam("bucket"); if (bucket != null && permittedBucket.equals(bucket) == false) { - // allow a null bucket to support bucket-free APIs - return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Bad bucket", ""); + // allow a null bucket to support the multi-object-delete API which + // passes the bucket name in the host header instead of the URL. + if (buckets.containsKey(bucket)) { + return newError(request.getId(), RestStatus.FORBIDDEN, "AccessDenied", "Bad bucket", ""); + } else { + return newBucketNotFoundError(request.getId(), bucket); + } } return handler.handle(request); } else { diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index aa9d05e0579e3..39ce992b7a58e 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -183,10 +183,6 @@ setup: --- "Register a repository with a non existing bucket": - - skip: - version: all - reason: to be fixed - - do: catch: /repository_exception/ snapshot.create_repository: diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml index 61ec7722903b6..497d85db752db 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml @@ -183,10 +183,6 @@ setup: --- "Register a repository with a non existing bucket": - - skip: - version: all - reason: to be fixed - - do: catch: /repository_exception/ snapshot.create_repository: From 5f87a84bef93dfd53b696cc6deac8edef22f5258 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 4 Jul 2018 14:07:20 +0200 Subject: [PATCH 35/36] [Docs] Correct default window_size (#31582) --- docs/reference/search/request/rescore.asciidoc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/reference/search/request/rescore.asciidoc b/docs/reference/search/request/rescore.asciidoc index 6e1bb2a9e6ce2..58d459a9456de 100644 --- a/docs/reference/search/request/rescore.asciidoc +++ b/docs/reference/search/request/rescore.asciidoc @@ -29,8 +29,7 @@ The query rescorer executes a second query only on the Top-K results returned by the <> and <> phases. The number of docs which will be examined on each shard can be controlled by -the `window_size` parameter, which defaults to -<>. +the `window_size` parameter, which defaults to 10. By default the scores from the original query and the rescore query are combined linearly to produce the final `_score` for each document. The From 308e37f80ed474c86546d3b319e439b7da3c810a Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 4 Jul 2018 13:56:32 +0100 Subject: [PATCH 36/36] [ML] Rate limit established model memory updates (#31768) There is at most one model size stats document per bucket, but during lookback a job can churn through many buckets very quickly. This can lead to many cluster state updates if established model memory needs to be updated for a given model size stats document. This change rate limits established model memory updates to one per job per 5 seconds. This is done by scheduling the updates 5 seconds in the future, but replacing the value to be written if another model size stats document is received during the waiting period. Updating the values in arrears like this means that the last value received will be the one associated with the job in the long term, whereas alternative approaches such as not updating the value if a new value was close to the old value would not. --- .../autodetect/AutodetectCommunicator.java | 3 +- .../output/AutoDetectResultProcessor.java | 131 ++++++++++++++---- .../AutoDetectResultProcessorTests.java | 91 ++++++++++-- 3 files changed, 188 insertions(+), 37 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java index 09a7f3c11040d..bdac41cd9b96d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; @@ -176,7 +177,7 @@ public void close(boolean restart, String reason) { // In this case the original exception is spurious and highly misleading throw ExceptionsHelper.conflictStatusException("Close job interrupted by kill request"); } else { - throw new ElasticsearchException(e); + throw FutureUtils.rethrowExecutionException(e); } } finally { destroyCategorizationAnalyzer(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java index 67eccb1caef41..da5e70112f045 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessor.java @@ -14,6 +14,9 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; @@ -30,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; import org.elasticsearch.xpack.core.ml.job.results.Influencer; import org.elasticsearch.xpack.core.ml.job.results.ModelPlot; +import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.persistence.JobProvider; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcess; @@ -43,6 +47,7 @@ import java.util.List; import java.util.Objects; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -71,6 +76,13 @@ public class AutoDetectResultProcessor { private static final Logger LOGGER = Loggers.getLogger(AutoDetectResultProcessor.class); + /** + * This is how far behind real-time we'll update the job with the latest established model memory. + * If more updates are received during the delay period then they'll take precedence. + * As a result there will be at most one update of established model memory per delay period. + */ + private static final TimeValue ESTABLISHED_MODEL_MEMORY_UPDATE_DELAY = TimeValue.timeValueSeconds(5); + private final Client client; private final Auditor auditor; private final String jobId; @@ -90,8 +102,10 @@ public class AutoDetectResultProcessor { * New model size stats are read as the process is running */ private volatile ModelSizeStats latestModelSizeStats; + private volatile Date latestDateForEstablishedModelMemoryCalc; private volatile long latestEstablishedModelMemory; private volatile boolean haveNewLatestModelSizeStats; + private Future scheduledEstablishedModelMemoryUpdate; // only accessed in synchronized methods public AutoDetectResultProcessor(Client client, Auditor auditor, String jobId, Renormalizer renormalizer, JobResultsPersister persister, JobProvider jobProvider, ModelSizeStats latestModelSizeStats, boolean restoredSnapshot) { @@ -148,6 +162,7 @@ public void process(AutodetectProcess process) { } LOGGER.info("[{}] {} buckets parsed from autodetect output", jobId, bucketCount); + runEstablishedModelMemoryUpdate(true); } catch (Exception e) { failed = true; @@ -194,15 +209,15 @@ void processResult(Context context, AutodetectResult result) { // persist after deleting interim results in case the new // results are also interim context.bulkResultsPersister.persistBucket(bucket).executeRequest(); + latestDateForEstablishedModelMemoryCalc = bucket.getTimestamp(); ++bucketCount; // if we haven't previously set established model memory, consider trying again after - // a reasonable amount of time has elapsed since the last model size stats update + // a reasonable number of buckets have elapsed since the last model size stats update long minEstablishedTimespanMs = JobProvider.BUCKETS_FOR_ESTABLISHED_MEMORY_SIZE * bucket.getBucketSpan() * 1000L; - if (haveNewLatestModelSizeStats && latestEstablishedModelMemory == 0 - && bucket.getTimestamp().getTime() > latestModelSizeStats.getTimestamp().getTime() + minEstablishedTimespanMs) { - persister.commitResultWrites(context.jobId); - updateEstablishedModelMemoryOnJob(bucket.getTimestamp(), latestModelSizeStats); + if (haveNewLatestModelSizeStats && latestEstablishedModelMemory == 0 && latestDateForEstablishedModelMemoryCalc.getTime() + > latestModelSizeStats.getTimestamp().getTime() + minEstablishedTimespanMs) { + scheduleEstablishedModelMemoryUpdate(ESTABLISHED_MODEL_MEMORY_UPDATE_DELAY); haveNewLatestModelSizeStats = false; } } @@ -293,15 +308,14 @@ private void processModelSizeStats(Context context, ModelSizeStats modelSizeStat persister.persistModelSizeStats(modelSizeStats); notifyModelMemoryStatusChange(context, modelSizeStats); latestModelSizeStats = modelSizeStats; + latestDateForEstablishedModelMemoryCalc = modelSizeStats.getTimestamp(); haveNewLatestModelSizeStats = true; // This is a crude way to NOT refresh the index and NOT attempt to update established model memory during the first 20 buckets // because this is when the model size stats are likely to be least stable and lots of updates will be coming through, and // we'll NEVER consider memory usage to be established during this period if (restoredSnapshot || bucketCount >= JobProvider.BUCKETS_FOR_ESTABLISHED_MEMORY_SIZE) { - // We need to make all results written up to and including these stats available for the established memory calculation - persister.commitResultWrites(context.jobId); - updateEstablishedModelMemoryOnJob(modelSizeStats.getTimestamp(), modelSizeStats); + scheduleEstablishedModelMemoryUpdate(ESTABLISHED_MODEL_MEMORY_UPDATE_DELAY); } } @@ -351,26 +365,91 @@ public void onFailure(Exception e) { }); } - private void updateEstablishedModelMemoryOnJob(Date latestBucketTimestamp, ModelSizeStats modelSizeStats) { - jobProvider.getEstablishedMemoryUsage(jobId, latestBucketTimestamp, modelSizeStats, establishedModelMemory -> { - JobUpdate update = new JobUpdate.Builder(jobId) - .setEstablishedModelMemory(establishedModelMemory).build(); - UpdateJobAction.Request updateRequest = UpdateJobAction.Request.internal(jobId, update); - updateRequest.setWaitForAck(false); - - executeAsyncWithOrigin(client, ML_ORIGIN, UpdateJobAction.INSTANCE, updateRequest, new ActionListener() { - @Override - public void onResponse(PutJobAction.Response response) { - latestEstablishedModelMemory = establishedModelMemory; - LOGGER.debug("[{}] Updated job with established model memory [{}]", jobId, establishedModelMemory); - } + /** + * The purpose of this method is to avoid saturating the cluster state update thread + * when a lookback job is churning through buckets very fast and the memory usage of + * the job is changing regularly. The idea is to only update the established model + * memory associated with the job a few seconds after the new value has been received. + * If more updates are received during the delay period then they simply replace the + * value that originally caused the update to be scheduled. This rate limits cluster + * state updates due to established model memory changing to one per job per delay period. + * (In reality updates will only occur this rapidly during lookback. During real-time + * operation the limit of one model size stats document per bucket will mean there is a + * maximum of one cluster state update per job per bucket, and usually the bucket span + * is 5 minutes or more.) + * @param delay The delay before updating established model memory. + */ + synchronized void scheduleEstablishedModelMemoryUpdate(TimeValue delay) { - @Override - public void onFailure(Exception e) { - LOGGER.error("[" + jobId + "] Failed to update job with new established model memory [" + establishedModelMemory + "]", - e); + if (scheduledEstablishedModelMemoryUpdate == null) { + try { + scheduledEstablishedModelMemoryUpdate = client.threadPool().schedule(delay, MachineLearning.UTILITY_THREAD_POOL_NAME, + () -> runEstablishedModelMemoryUpdate(false)); + LOGGER.trace("[{}] Scheduled established model memory update to run in [{}]", jobId, delay); + } catch (EsRejectedExecutionException e) { + if (e.isExecutorShutdown()) { + LOGGER.debug("failed to schedule established model memory update; shutting down", e); + } else { + throw e; } - }); + } + } + } + + /** + * This method is called from two places: + * - From the {@link Future} used for delayed updates + * - When shutting down this result processor + * When shutting down the result processor it's only necessary to do anything + * if an update has been scheduled, but we want to do the update immediately. + * Despite cancelling the scheduled update in this case, it's possible that + * it's already started running, in which case this method will get called + * twice in quick succession. But the second call will do nothing, as + * scheduledEstablishedModelMemoryUpdate will have been reset + * to null by the first call. + */ + private synchronized void runEstablishedModelMemoryUpdate(boolean cancelExisting) { + + if (scheduledEstablishedModelMemoryUpdate != null) { + if (cancelExisting) { + LOGGER.debug("[{}] Bringing forward previously scheduled established model memory update", jobId); + FutureUtils.cancel(scheduledEstablishedModelMemoryUpdate); + } + scheduledEstablishedModelMemoryUpdate = null; + updateEstablishedModelMemoryOnJob(); + } + } + + private void updateEstablishedModelMemoryOnJob() { + + // Copy these before committing writes, so the calculation is done based on committed documents + Date latestBucketTimestamp = latestDateForEstablishedModelMemoryCalc; + ModelSizeStats modelSizeStatsForCalc = latestModelSizeStats; + + // We need to make all results written up to and including these stats available for the established memory calculation + persister.commitResultWrites(jobId); + + jobProvider.getEstablishedMemoryUsage(jobId, latestBucketTimestamp, modelSizeStatsForCalc, establishedModelMemory -> { + if (latestEstablishedModelMemory != establishedModelMemory) { + JobUpdate update = new JobUpdate.Builder(jobId).setEstablishedModelMemory(establishedModelMemory).build(); + UpdateJobAction.Request updateRequest = UpdateJobAction.Request.internal(jobId, update); + updateRequest.setWaitForAck(false); + + executeAsyncWithOrigin(client, ML_ORIGIN, UpdateJobAction.INSTANCE, updateRequest, + new ActionListener() { + @Override + public void onResponse(PutJobAction.Response response) { + latestEstablishedModelMemory = establishedModelMemory; + LOGGER.debug("[{}] Updated job with established model memory [{}]", jobId, establishedModelMemory); + } + + @Override + public void onFailure(Exception e) { + LOGGER.error("[" + jobId + "] Failed to update job with new established model memory [" + + establishedModelMemory + "]", e); + } + }); + } }, e -> LOGGER.error("[" + jobId + "] Failed to calculate established model memory", e)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java index 1221f85e61de8..8eb0317ba0dbe 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutoDetectResultProcessorTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -34,6 +35,7 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.Renormalizer; import org.elasticsearch.xpack.ml.job.results.AutodetectResult; import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.junit.After; import org.junit.Before; import org.mockito.InOrder; @@ -43,14 +45,16 @@ import java.util.Date; import java.util.Iterator; import java.util.List; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Consumer; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.eq; -import static org.mockito.Matchers.isNull; import static org.mockito.Matchers.same; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.inOrder; @@ -64,7 +68,9 @@ public class AutoDetectResultProcessorTests extends ESTestCase { private static final String JOB_ID = "_id"; + private static final long BUCKET_SPAN_MS = 1000; + private ThreadPool threadPool; private Client client; private Auditor auditor; private Renormalizer renormalizer; @@ -72,12 +78,14 @@ public class AutoDetectResultProcessorTests extends ESTestCase { private JobProvider jobProvider; private FlushListener flushListener; private AutoDetectResultProcessor processorUnderTest; + private ScheduledThreadPoolExecutor executor; @Before public void setUpMocks() { + executor = new ScheduledThreadPoolExecutor(1); client = mock(Client.class); auditor = mock(Auditor.class); - ThreadPool threadPool = mock(ThreadPool.class); + threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); renormalizer = mock(Renormalizer.class); @@ -85,7 +93,12 @@ public void setUpMocks() { jobProvider = mock(JobProvider.class); flushListener = mock(FlushListener.class); processorUnderTest = new AutoDetectResultProcessor(client, auditor, JOB_ID, renormalizer, persister, jobProvider, - new ModelSizeStats.Builder(JOB_ID).build(), false, flushListener); + new ModelSizeStats.Builder(JOB_ID).setTimestamp(new Date(BUCKET_SPAN_MS)).build(), false, flushListener); + } + + @After + public void cleanup() { + executor.shutdown(); } public void testProcess() throws TimeoutException { @@ -289,6 +302,8 @@ public void testProcessResult_modelSizeStats() { public void testProcessResult_modelSizeStatsWithMemoryStatusChanges() { JobResultsPersister.Builder bulkBuilder = mock(JobResultsPersister.Builder.class); + setupScheduleDelayTime(TimeValue.timeValueSeconds(5)); + AutoDetectResultProcessor.Context context = new AutoDetectResultProcessor.Context(JOB_ID, bulkBuilder); context.deleteInterimRequired = false; AutodetectResult result = mock(AutodetectResult.class); @@ -322,11 +337,14 @@ public void testProcessResult_modelSizeStatsWithMemoryStatusChanges() { verifyNoMoreInteractions(auditor); } - public void testProcessResult_modelSizeStatsAfterManyBuckets() { + public void testProcessResult_modelSizeStatsAfterManyBuckets() throws Exception { JobResultsPersister.Builder bulkBuilder = mock(JobResultsPersister.Builder.class); when(persister.bulkPersisterBuilder(JOB_ID)).thenReturn(bulkBuilder); when(bulkBuilder.persistBucket(any(Bucket.class))).thenReturn(bulkBuilder); + // To avoid slowing down the test this is using a delay of 1 nanosecond rather than the 5 seconds used in production + setupScheduleDelayTime(TimeValue.timeValueNanos(1)); + AutoDetectResultProcessor.Context context = new AutoDetectResultProcessor.Context(JOB_ID, bulkBuilder); context.deleteInterimRequired = false; for (int i = 0; i < JobProvider.BUCKETS_FOR_ESTABLISHED_MEMORY_SIZE; ++i) { @@ -338,16 +356,64 @@ public void testProcessResult_modelSizeStatsAfterManyBuckets() { AutodetectResult result = mock(AutodetectResult.class); ModelSizeStats modelSizeStats = mock(ModelSizeStats.class); + Date timestamp = new Date(BUCKET_SPAN_MS); + when(modelSizeStats.getTimestamp()).thenReturn(timestamp); when(result.getModelSizeStats()).thenReturn(modelSizeStats); processorUnderTest.processResult(context, result); - verify(persister, times(1)).persistModelSizeStats(modelSizeStats); - verify(persister, times(1)).commitResultWrites(JOB_ID); - verifyNoMoreInteractions(persister); - verify(jobProvider, times(1)).getEstablishedMemoryUsage(eq(JOB_ID), isNull(Date.class), eq(modelSizeStats), + // Some calls will be made 1 nanosecond later in a different thread, hence the assertBusy() + assertBusy(() -> { + verify(persister, times(1)).persistModelSizeStats(modelSizeStats); + verify(persister, times(1)).commitResultWrites(JOB_ID); + verifyNoMoreInteractions(persister); + verify(jobProvider, times(1)).getEstablishedMemoryUsage(eq(JOB_ID), eq(timestamp), eq(modelSizeStats), any(Consumer.class), + any(Consumer.class)); + verifyNoMoreInteractions(jobProvider); + assertEquals(modelSizeStats, processorUnderTest.modelSizeStats()); + }); + } + + public void testProcessResult_manyModelSizeStatsInQuickSuccession() throws Exception { + JobResultsPersister.Builder bulkBuilder = mock(JobResultsPersister.Builder.class); + when(persister.bulkPersisterBuilder(JOB_ID)).thenReturn(bulkBuilder); + when(bulkBuilder.persistBucket(any(Bucket.class))).thenReturn(bulkBuilder); + + setupScheduleDelayTime(TimeValue.timeValueSeconds(1)); + + AutoDetectResultProcessor.Context context = new AutoDetectResultProcessor.Context(JOB_ID, bulkBuilder); + context.deleteInterimRequired = false; + ModelSizeStats modelSizeStats = null; + for (int i = 1; i <= JobProvider.BUCKETS_FOR_ESTABLISHED_MEMORY_SIZE + 5; ++i) { + AutodetectResult result = mock(AutodetectResult.class); + Bucket bucket = mock(Bucket.class); + when(bucket.getTimestamp()).thenReturn(new Date(BUCKET_SPAN_MS * i)); + when(result.getBucket()).thenReturn(bucket); + processorUnderTest.processResult(context, result); + if (i > JobProvider.BUCKETS_FOR_ESTABLISHED_MEMORY_SIZE) { + result = mock(AutodetectResult.class); + modelSizeStats = mock(ModelSizeStats.class); + when(modelSizeStats.getTimestamp()).thenReturn(new Date(BUCKET_SPAN_MS * i)); + when(result.getModelSizeStats()).thenReturn(modelSizeStats); + processorUnderTest.processResult(context, result); + } + } + + ModelSizeStats lastModelSizeStats = modelSizeStats; + assertNotNull(lastModelSizeStats); + Date lastTimestamp = lastModelSizeStats.getTimestamp(); + + // Some calls will be made 1 second later in a different thread, hence the assertBusy() + assertBusy(() -> { + // All the model size stats should be persisted to the index... + verify(persister, times(5)).persistModelSizeStats(any(ModelSizeStats.class)); + // ...but only the last should trigger an established model memory update + verify(persister, times(1)).commitResultWrites(JOB_ID); + verifyNoMoreInteractions(persister); + verify(jobProvider, times(1)).getEstablishedMemoryUsage(eq(JOB_ID), eq(lastTimestamp), eq(lastModelSizeStats), any(Consumer.class), any(Consumer.class)); - verifyNoMoreInteractions(jobProvider); - assertEquals(modelSizeStats, processorUnderTest.modelSizeStats()); + verifyNoMoreInteractions(jobProvider); + assertEquals(lastModelSizeStats, processorUnderTest.modelSizeStats()); + }); } public void testProcessResult_modelSnapshot() { @@ -487,4 +553,9 @@ public void testKill() throws TimeoutException { verify(renormalizer, times(1)).waitUntilIdle(); verify(flushListener, times(1)).clear(); } + + private void setupScheduleDelayTime(TimeValue delay) { + when(threadPool.schedule(any(TimeValue.class), anyString(), any(Runnable.class))) + .thenAnswer(i -> executor.schedule((Runnable) i.getArguments()[2], delay.nanos(), TimeUnit.NANOSECONDS)); + } }