From 08789d398e400587a62218b3b726bc12ac07d617 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 5 Oct 2023 09:59:33 -0400 Subject: [PATCH 001/176] ESQL: Move cast in Block reading (#100334) This moves the cast required for reading `Block`s from the special subclass a little earlier. --- .../org/elasticsearch/compute/data/BooleanBlock.java | 8 ++++++-- .../org/elasticsearch/compute/data/BytesRefBlock.java | 8 ++++++-- .../org/elasticsearch/compute/data/DoubleBlock.java | 8 ++++++-- .../org/elasticsearch/compute/data/IntBlock.java | 8 ++++++-- .../org/elasticsearch/compute/data/LongBlock.java | 8 ++++++-- .../java/org/elasticsearch/compute/data/X-Block.java.st | 8 ++++++-- 6 files changed, 36 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index 80f396695fc2f..5b58e7bcf5c30 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -44,12 +44,16 @@ default String getWriteableName() { NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Block.class, "BooleanBlock", BooleanBlock::readFrom); private static BooleanBlock readFrom(StreamInput in) throws IOException { + return readFrom((BlockStreamInput) in); + } + + private static BooleanBlock readFrom(BlockStreamInput in) throws IOException { final boolean isVector = in.readBoolean(); if (isVector) { - return BooleanVector.readFrom(((BlockStreamInput) in).blockFactory(), in).asBlock(); + return BooleanVector.readFrom(in.blockFactory(), in).asBlock(); } final int positions = in.readVInt(); - try (BooleanBlock.Builder builder = ((BlockStreamInput) in).blockFactory().newBooleanBlockBuilder(positions)) { + try (BooleanBlock.Builder builder = in.blockFactory().newBooleanBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { if (in.readBoolean()) { builder.appendNull(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index 9409212a9c998..9c48ac61d5a1b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -48,12 +48,16 @@ default String getWriteableName() { NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Block.class, "BytesRefBlock", BytesRefBlock::readFrom); private static BytesRefBlock readFrom(StreamInput in) throws IOException { + return readFrom((BlockStreamInput) in); + } + + private static BytesRefBlock readFrom(BlockStreamInput in) throws IOException { final boolean isVector = in.readBoolean(); if (isVector) { - return BytesRefVector.readFrom(((BlockStreamInput) in).blockFactory(), in).asBlock(); + return BytesRefVector.readFrom(in.blockFactory(), in).asBlock(); } final int positions = in.readVInt(); - try (BytesRefBlock.Builder builder = ((BlockStreamInput) in).blockFactory().newBytesRefBlockBuilder(positions)) { + try (BytesRefBlock.Builder builder = in.blockFactory().newBytesRefBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { if (in.readBoolean()) { builder.appendNull(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index 806ee6d3680bc..a3dba750556ab 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -44,12 +44,16 @@ default String getWriteableName() { NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Block.class, "DoubleBlock", DoubleBlock::readFrom); private static DoubleBlock readFrom(StreamInput in) throws IOException { + return readFrom((BlockStreamInput) in); + } + + private static DoubleBlock readFrom(BlockStreamInput in) throws IOException { final boolean isVector = in.readBoolean(); if (isVector) { - return DoubleVector.readFrom(((BlockStreamInput) in).blockFactory(), in).asBlock(); + return DoubleVector.readFrom(in.blockFactory(), in).asBlock(); } final int positions = in.readVInt(); - try (DoubleBlock.Builder builder = ((BlockStreamInput) in).blockFactory().newDoubleBlockBuilder(positions)) { + try (DoubleBlock.Builder builder = in.blockFactory().newDoubleBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { if (in.readBoolean()) { builder.appendNull(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index 580da5e5a7415..d343428aab2bc 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -44,12 +44,16 @@ default String getWriteableName() { NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Block.class, "IntBlock", IntBlock::readFrom); private static IntBlock readFrom(StreamInput in) throws IOException { + return readFrom((BlockStreamInput) in); + } + + private static IntBlock readFrom(BlockStreamInput in) throws IOException { final boolean isVector = in.readBoolean(); if (isVector) { - return IntVector.readFrom(((BlockStreamInput) in).blockFactory(), in).asBlock(); + return IntVector.readFrom(in.blockFactory(), in).asBlock(); } final int positions = in.readVInt(); - try (IntBlock.Builder builder = ((BlockStreamInput) in).blockFactory().newIntBlockBuilder(positions)) { + try (IntBlock.Builder builder = in.blockFactory().newIntBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { if (in.readBoolean()) { builder.appendNull(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index 2db757efd7091..9ff3a5ba116a4 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -44,12 +44,16 @@ default String getWriteableName() { NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Block.class, "LongBlock", LongBlock::readFrom); private static LongBlock readFrom(StreamInput in) throws IOException { + return readFrom((BlockStreamInput) in); + } + + private static LongBlock readFrom(BlockStreamInput in) throws IOException { final boolean isVector = in.readBoolean(); if (isVector) { - return LongVector.readFrom(((BlockStreamInput) in).blockFactory(), in).asBlock(); + return LongVector.readFrom(in.blockFactory(), in).asBlock(); } final int positions = in.readVInt(); - try (LongBlock.Builder builder = ((BlockStreamInput) in).blockFactory().newLongBlockBuilder(positions)) { + try (LongBlock.Builder builder = in.blockFactory().newLongBlockBuilder(positions)) { for (int i = 0; i < positions; i++) { if (in.readBoolean()) { builder.appendNull(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index 81a0d3de7f8f7..e8ccc83b51351 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -60,12 +60,16 @@ $endif$ NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Block.class, "$Type$Block", $Type$Block::readFrom); private static $Type$Block readFrom(StreamInput in) throws IOException { + return readFrom((BlockStreamInput) in); + } + + private static $Type$Block readFrom(BlockStreamInput in) throws IOException { final boolean isVector = in.readBoolean(); if (isVector) { - return $Type$Vector.readFrom(((BlockStreamInput) in).blockFactory(), in).asBlock(); + return $Type$Vector.readFrom(in.blockFactory(), in).asBlock(); } final int positions = in.readVInt(); - try ($Type$Block.Builder builder = ((BlockStreamInput) in).blockFactory().new$Type$BlockBuilder(positions)) { + try ($Type$Block.Builder builder = in.blockFactory().new$Type$BlockBuilder(positions)) { for (int i = 0; i < positions; i++) { if (in.readBoolean()) { builder.appendNull(); From 06f838d2f8d0eb92b11214ff600524f5fb6d8227 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 5 Oct 2023 08:07:52 -0600 Subject: [PATCH 002/176] Add more debugging for file settings edge case (#100313) In order to continue debugging #98391, this commit adds more debug logging to the test, to determine if the error metadata is not being placed in the cluster state correctly. --- .../security/FileSettingsRoleMappingsStartupIT.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java index 7b02495c7227b..48e97b7afb897 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java @@ -105,6 +105,10 @@ public void clusterChanged(ClusterChangedEvent event) { clusterService.removeListener(this); metadataVersion.set(event.state().metadata().version()); savedClusterState.countDown(); + } else if (reservedState != null) { + logger.debug(() -> "Got reserved state update without error metadata: " + reservedState); + } else { + logger.debug(() -> "Got cluster state update: " + event.source()); } } }); @@ -112,7 +116,10 @@ public void clusterChanged(ClusterChangedEvent event) { return new Tuple<>(savedClusterState, metadataVersion); } - @TestLogging(value = "org.elasticsearch.common.file:DEBUG", reason = "https://github.com/elastic/elasticsearch/issues/98391") + @TestLogging( + value = "org.elasticsearch.common.file:DEBUG,org.elasticsearch.xpack.security:DEBUG,org.elasticsearch.cluster.metadata:DEBUG", + reason = "https://github.com/elastic/elasticsearch/issues/98391" + ) public void testFailsOnStartMasterNodeWithError() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); From 7c64459482a5899ec8c966d72a73fa3bc79825e0 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 5 Oct 2023 10:28:10 -0400 Subject: [PATCH 003/176] ESQL: Reenable some tests (#100339) We have fixed them! --- .../testFixtures/src/main/resources/keep.csv-spec | 3 +-- .../testFixtures/src/main/resources/row.csv-spec | 15 +++++---------- .../testFixtures/src/main/resources/show.csv-spec | 3 +-- .../src/main/resources/stats.csv-spec | 3 +-- .../src/main/resources/version.csv-spec | 3 +-- 5 files changed, 9 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec index 8e5f3459ca95c..13a8b8f66fc4f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec @@ -205,8 +205,7 @@ emp_no:integer | languages:integer | gender:keyword | first_name:keyword | abc:i 10100 | 4 | F | Hironobu | 3 ; -# awaitsfix https://github.com/elastic/elasticsearch/issues/99826 -projectFromWithStatsAfterLimit-Ignore +projectFromWithStatsAfterLimit from employees | sort emp_no | keep gender, avg_worked_seconds, first_name, last_name | limit 10 | stats m = max(avg_worked_seconds) by gender; m:long | gender:keyword diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec index 50f0d5b72dd41..1b424dd876caa 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/row.csv-spec @@ -170,24 +170,21 @@ a:integer | b:integer | y:integer 1 | 2 | null ; -// AwaitsFix https://github.com/elastic/elasticsearch/issues/99826 -rowWithNullsInCount-Ignore +rowWithNullsInCount row a = 1.5, b = 2.6, c = null | eval s = null + a + b | stats c = count(s); c:long 0 ; -# AwaitsFix https://github.com/elastic/elasticsearch/issues/99826 -rowWithNullsInAvg-Ignore +rowWithNullsInAvg row a = 1.5, b = 2.6, c = null | eval s = null + a + b | stats c = avg(s); c:double null ; -// AwaitsFix https://github.com/elastic/elasticsearch/issues/99826 -rowWithNullsInAvg2-Ignore +rowWithNullsInAvg2 row a = 1.5, b = 2.6, c = null | eval s = a - b * c | stats avg(s); avg(s):double @@ -228,16 +225,14 @@ row a = 1 | limit 0; a:integer ; -// AwaitsFix https://github.com/elastic/elasticsearch/issues/99826 -rowWithMultipleStats-Ignore +rowWithMultipleStats row a = 1+3, b = 2, ab = 5 | eval x = 1 + b + 5 | stats avg = avg(x), min(x), max(x), count(x), avg(x), avg(ab), avg(a); avg:double | min(x):integer | max(x):integer | count(x):long | avg(x):double | avg(ab):double | avg(a):double 8.0 | 8 | 8 | 1 | 8.0 | 5.0 | 4.0 ; -# AwaitsFix https://github.com/elastic/elasticsearch/issues/99826 -rowWithMultipleStatsOverNull-Ignore +rowWithMultipleStatsOverNull row x=1, y=2 | eval tot = null + y + x | stats c=count(tot), a=avg(tot), mi=min(tot), ma=max(tot), s=sum(tot); c:long | a:double | mi:integer | ma:integer | s:long diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 2bb6a2be9832f..60c5fc94ba0d6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -1,5 +1,4 @@ -# AwaitsFix https://github.com/elastic/elasticsearch/issues/99826 -showInfo-Ignore +showInfo show info | stats v = count(version); v:long diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 47a0bd92a56ba..d671ba6ec13b1 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -525,8 +525,7 @@ min(salary):i | max(salary):i | c:l 25324 | 74999 | 100 ; -# AwaitsFix https://github.com/elastic/elasticsearch/issues/99826 -statsWithLiterals-Ignore +statsWithLiterals from employees | limit 10 | eval x = 1 | stats c = count(x); c:l diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec index 08196b2d7726d..df1fa6e67f279 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec @@ -203,8 +203,7 @@ bad 5.2.9-SNAPSHOT ; -# AwaitsFix https://github.com/elastic/elasticsearch/issues/99826 -groupByVersionCast-Ignore +groupByVersionCast FROM apps | EVAL g = TO_VER(CONCAT("1.", TO_STR(version))) | STATS id = MAX(id) BY g | SORT id | DROP g; id:i From 055df9a104a646cbb12db0b4d11ccaf6ecba6ffa Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Thu, 5 Oct 2023 16:28:48 +0200 Subject: [PATCH 004/176] ESQL: eval - enable block tracking for boolean logic (#100325) --- .../xpack/esql/evaluator/EvalMapper.java | 50 ++++++++++--------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java index 69dd9c1a50202..53a915046b45f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java @@ -105,37 +105,39 @@ public Block.Ref eval(Page page) { */ private Block eval(Block lhs, Block rhs) { int positionCount = lhs.getPositionCount(); - BooleanBlock.Builder result = BooleanBlock.newBlockBuilder(positionCount); - for (int p = 0; p < positionCount; p++) { - if (lhs.getValueCount(p) > 1) { - result.appendNull(); - continue; + try (BooleanBlock.Builder result = BooleanBlock.newBlockBuilder(positionCount, lhs.blockFactory())) { + for (int p = 0; p < positionCount; p++) { + if (lhs.getValueCount(p) > 1) { + result.appendNull(); + continue; + } + if (rhs.getValueCount(p) > 1) { + result.appendNull(); + continue; + } + Boolean v = bl.function() + .apply( + lhs.isNull(p) ? null : ((BooleanBlock) lhs).getBoolean(lhs.getFirstValueIndex(p)), + rhs.isNull(p) ? null : ((BooleanBlock) rhs).getBoolean(rhs.getFirstValueIndex(p)) + ); + if (v == null) { + result.appendNull(); + continue; + } + result.appendBoolean(v); } - if (rhs.getValueCount(p) > 1) { - result.appendNull(); - continue; - } - Boolean v = bl.function() - .apply( - lhs.isNull(p) ? null : ((BooleanBlock) lhs).getBoolean(lhs.getFirstValueIndex(p)), - rhs.isNull(p) ? null : ((BooleanBlock) rhs).getBoolean(rhs.getFirstValueIndex(p)) - ); - if (v == null) { - result.appendNull(); - continue; - } - result.appendBoolean(v); + return result.build(); } - return result.build(); } private Block eval(BooleanVector lhs, BooleanVector rhs) { int positionCount = lhs.getPositionCount(); - BooleanVector.Builder result = BooleanVector.newVectorBuilder(positionCount); - for (int p = 0; p < positionCount; p++) { - result.appendBoolean(bl.function().apply(lhs.getBoolean(p), rhs.getBoolean(p))); + try (var result = BooleanVector.newVectorFixedBuilder(positionCount, lhs.blockFactory())) { + for (int p = 0; p < positionCount; p++) { + result.appendBoolean(bl.function().apply(lhs.getBoolean(p), rhs.getBoolean(p))); + } + return result.build().asBlock(); } - return result.build().asBlock(); } @Override From e1fa3ef50ba413806adb27e414484cbf653e2a8b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 5 Oct 2023 10:53:43 -0400 Subject: [PATCH 005/176] ESQL: Reeanble another test (#100342) We'd disabled this one a week ago because `EVAL | STATS` wasn't properly releasing blocks. It does now so we can have this test back. --- .../elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java index 206e6b48db2f7..23638ef9384cc 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java @@ -104,7 +104,6 @@ private Response sortByManyLongs(int count) throws IOException { /** * This groups on about 200 columns which is a lot but has never caused us trouble. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") public void testGroupOnSomeLongs() throws IOException { initManyLongs(); Map map = XContentHelper.convertToMap( From 8552a928822b1cb6f2f0162d642a7fab72d3eb8e Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Thu, 5 Oct 2023 10:53:55 -0500 Subject: [PATCH 006/176] Split isAvailable check for more accurate results in stateless (#100172) This commit extends the isAvailable() for security indices to specify which set of shards to check for availability. The prior to this change the only supported check was for if all primary shards (as defined by cluster state) were available. In stateful ES that is sufficient, but in stateless ES there is a concept of search only shards which are not primary and can not be promoted to a primary, and the primary is not searchable. There exists scenarios (usually during start up or shutdown) where the primary is available, but the search shards may not be (and the primary is not searchable). For these cases, and better semantics the isAvaible() check has been extended to specify if it is available for search or primary workflows. Search is for .. search workflows, and primary for all others (including realtime get). This check (before and after this change) is more of an optimization to avoid unnecessary work and provide better error messages and this commit helps to make that optimization more correct. related: #99890 --- .../xpack/security/authc/ApiKeyService.java | 22 +-- .../security/authc/ExpiredTokenRemover.java | 5 +- .../xpack/security/authc/TokenService.java | 28 ++-- .../authc/esnative/NativeUsersStore.java | 36 ++--- .../IndexServiceAccountTokenStore.java | 14 +- .../mapper/NativeRoleMappingStore.java | 33 ++--- .../authz/store/NativePrivilegeStore.java | 14 +- .../authz/store/NativeRolesStore.java | 22 +-- .../security/profile/ProfileService.java | 23 +-- .../support/SecurityIndexManager.java | 134 ++++++++++++++---- ...ansportOpenIdConnectLogoutActionTests.java | 5 +- ...sportSamlInvalidateSessionActionTests.java | 5 +- .../saml/TransportSamlLogoutActionTests.java | 5 +- .../TransportInvalidateTokenActionTests.java | 14 +- .../user/TransportGetUsersActionTests.java | 12 +- .../security/authc/ApiKeyServiceTests.java | 2 +- .../authc/AuthenticationServiceTests.java | 11 +- .../security/authc/TokenServiceTests.java | 16 ++- .../authc/esnative/NativeRealmTests.java | 1 + .../authc/esnative/NativeUsersStoreTests.java | 5 +- .../authc/ldap/ActiveDirectoryRealmTests.java | 5 +- .../security/authc/ldap/LdapRealmTests.java | 5 +- .../IndexServiceAccountTokenStoreTests.java | 15 +- .../mapper/NativeRoleMappingStoreTests.java | 6 +- .../authz/store/CompositeRolesStoreTests.java | 1 + .../store/NativePrivilegeStoreTests.java | 6 +- .../security/profile/ProfileServiceTests.java | 4 +- .../CacheInvalidatorRegistryTests.java | 1 + .../support/SecurityIndexManagerTests.java | 105 +++++++++++++- .../xpack/security/test/SecurityMocks.java | 5 +- 30 files changed, 390 insertions(+), 170 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 3f23e62bad729..ce622ddf6fa69 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -149,6 +149,8 @@ import static org.elasticsearch.xpack.core.security.action.apikey.CrossClusterApiKeyRoleDescriptorBuilder.CCS_CLUSTER_PRIVILEGE_NAMES; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.WORKFLOWS_RESTRICTION_VERSION; import static org.elasticsearch.xpack.security.Security.SECURITY_CRYPTO_THREAD_POOL_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; public class ApiKeyService { @@ -1309,12 +1311,12 @@ public void crossClusterApiKeyUsageStats(ActionListener> lis listener.onResponse(Map.of()); return; } - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { logger.debug("security index does not exist"); listener.onResponse(Map.of("total", 0, "ccs", 0, "ccr", 0, "ccs_ccr", 0)); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); } else { final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery("doc_type", "api_key")) @@ -1638,11 +1640,11 @@ private void findApiKeysForUserRealmApiKeyIdAndNameCombination( Function hitParser, ActionListener> listener ) { - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(Collections.emptyList()); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); } else { final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery().filter(QueryBuilders.termQuery("doc_type", "api_key")); QueryBuilder realmsQuery = filterForRealmNames(realmNames); @@ -1879,7 +1881,7 @@ long lastTimeWhenApiKeysRemoverWasTriggered() { } private void maybeStartApiKeyRemover() { - if (securityIndex.isAvailable()) { + if (securityIndex.isAvailable(PRIMARY_SHARDS)) { if (client.threadPool().relativeTimeInMillis() - lastExpirationRunMs > deleteInterval.getMillis()) { inactiveApiKeysRemover.submit(client.threadPool()); lastExpirationRunMs = client.threadPool().relativeTimeInMillis(); @@ -1935,12 +1937,12 @@ public void getApiKeys( public void queryApiKeys(SearchRequest searchRequest, boolean withLimitedBy, ActionListener listener) { ensureEnabled(); - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { logger.debug("security index does not exist"); listener.onResponse(QueryApiKeyResponse.emptyResponse()); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); } else { securityIndex.checkIndexVersionThenExecute( listener::onFailure, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java index 060f53ee4d8ad..bec9e90c9f190 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java @@ -34,6 +34,7 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; /** * Responsible for cleaning the invalidated and expired tokens from the security indices (`main` and `tokens`). @@ -68,10 +69,10 @@ final class ExpiredTokenRemover extends AbstractRunnable { @Override public void doRun() { final List indicesWithTokens = new ArrayList<>(); - if (securityTokensIndex.isAvailable()) { + if (securityTokensIndex.isAvailable(PRIMARY_SHARDS)) { indicesWithTokens.add(securityTokensIndex.aliasName()); } - if (securityMainIndex.isAvailable() && checkMainIndexForExpiredTokens) { + if (securityMainIndex.isAvailable(PRIMARY_SHARDS) && checkMainIndexForExpiredTokens) { indicesWithTokens.add(securityMainIndex.aliasName()); } if (indicesWithTokens.isEmpty()) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index e543248b8ad1d..794bcd96a66c3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -147,6 +147,8 @@ import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; /** * Service responsible for the creation, validation, and other management of {@link UserToken} @@ -552,10 +554,10 @@ private void getTokenDocById( ActionListener listener ) { final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); - final SecurityIndexManager frozenTokensIndex = tokensIndex.freeze(); - if (frozenTokensIndex.isAvailable() == false) { + final SecurityIndexManager frozenTokensIndex = tokensIndex.defensiveCopy(); + if (frozenTokensIndex.isAvailable(PRIMARY_SHARDS) == false) { logger.warn("failed to get access token [{}] because index [{}] is not available", tokenId, tokensIndex.aliasName()); - listener.onFailure(frozenTokensIndex.getUnavailableReason()); + listener.onFailure(frozenTokensIndex.getUnavailableReason(PRIMARY_SHARDS)); return; } final GetRequest getRequest = client.prepareGet(tokensIndex.aliasName(), getTokenDocumentId(tokenId)).request(); @@ -1168,13 +1170,13 @@ private void findTokenFromRefreshToken( onFailure.accept(ex); } }; - final SecurityIndexManager frozenTokensIndex = tokensIndexManager.freeze(); + final SecurityIndexManager frozenTokensIndex = tokensIndexManager.defensiveCopy(); if (frozenTokensIndex.indexExists() == false) { logger.warn("index [{}] does not exist so we can't find token from refresh token", frozenTokensIndex.aliasName()); - listener.onFailure(frozenTokensIndex.getUnavailableReason()); - } else if (frozenTokensIndex.isAvailable() == false) { + listener.onFailure(new IndexNotFoundException(frozenTokensIndex.aliasName())); + } else if (frozenTokensIndex.isAvailable(SEARCH_SHARDS) == false) { logger.debug("index [{}] is not available to find token from refresh token, retrying", frozenTokensIndex.aliasName()); - maybeRetryOnFailure.accept(frozenTokensIndex.getUnavailableReason()); + maybeRetryOnFailure.accept(frozenTokensIndex.getUnavailableReason(SEARCH_SHARDS)); } else { final SearchRequest request = client.prepareSearch(tokensIndexManager.aliasName()) .setQuery( @@ -1786,11 +1788,11 @@ private void searchActiveTokens( */ private void sourceIndicesWithTokensAndRun(ActionListener> listener) { final List indicesWithTokens = new ArrayList<>(2); - final SecurityIndexManager frozenTokensIndex = securityTokensIndex.freeze(); + final SecurityIndexManager frozenTokensIndex = securityTokensIndex.defensiveCopy(); if (frozenTokensIndex.indexExists()) { // an existing tokens index always contains tokens (if available and version allows) - if (false == frozenTokensIndex.isAvailable()) { - listener.onFailure(frozenTokensIndex.getUnavailableReason()); + if (false == frozenTokensIndex.isAvailable(SEARCH_SHARDS)) { + listener.onFailure(frozenTokensIndex.getUnavailableReason(SEARCH_SHARDS)); return; } if (false == frozenTokensIndex.isIndexUpToDate()) { @@ -1806,14 +1808,14 @@ private void sourceIndicesWithTokensAndRun(ActionListener> listener } indicesWithTokens.add(frozenTokensIndex.aliasName()); } - final SecurityIndexManager frozenMainIndex = securityMainIndex.freeze(); + final SecurityIndexManager frozenMainIndex = securityMainIndex.defensiveCopy(); if (frozenMainIndex.indexExists()) { // main security index _might_ contain tokens if the tokens index has been created recently if (false == frozenTokensIndex.indexExists() || frozenTokensIndex.getCreationTime() .isAfter(clock.instant().minus(ExpiredTokenRemover.MAXIMUM_TOKEN_LIFETIME_HOURS, ChronoUnit.HOURS))) { - if (false == frozenMainIndex.isAvailable()) { - listener.onFailure(frozenMainIndex.getUnavailableReason()); + if (false == frozenMainIndex.isAvailable(SEARCH_SHARDS)) { + listener.onFailure(frozenMainIndex.getUnavailableReason(SEARCH_SHARDS)); return; } if (false == frozenMainIndex.isIndexUpToDate()) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 76029b779d8d9..36f78682b6bd1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -64,6 +64,8 @@ import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; /** @@ -118,11 +120,11 @@ public void getUsers(String[] userNames, final ActionListener> listener.onFailure(t); }; - final SecurityIndexManager frozenSecurityIndex = this.securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = this.securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(Collections.emptyList()); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(PRIMARY_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(PRIMARY_SHARDS)); } else if (userNames.length == 1) { // optimization for single user lookup final String username = userNames[0]; getUserAndPassword( @@ -160,11 +162,11 @@ public void getUsers(String[] userNames, final ActionListener> } void getUserCount(final ActionListener listener) { - final SecurityIndexManager frozenSecurityIndex = this.securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = this.securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(0L); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); } else { securityIndex.checkIndexVersionThenExecute( listener::onFailure, @@ -187,8 +189,8 @@ void getUserCount(final ActionListener listener) { * Async method to retrieve a user and their password */ private void getUserAndPassword(final String user, final ActionListener listener) { - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); - if (frozenSecurityIndex.isAvailable() == false) { + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); + if (frozenSecurityIndex.isAvailable(PRIMARY_SHARDS) == false) { if (frozenSecurityIndex.indexExists() == false) { logger.trace("could not retrieve user [{}] because security index does not exist", user); } else { @@ -537,11 +539,11 @@ private void setReservedUserEnabled( } public void deleteUser(final DeleteUserRequest deleteUserRequest, final ActionListener listener) { - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(false); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(PRIMARY_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(PRIMARY_SHARDS)); } else { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { DeleteRequest request = client.prepareDelete(SECURITY_MAIN_ALIAS, getIdForUser(USER_DOC_TYPE, deleteUserRequest.username())) @@ -595,11 +597,11 @@ void verifyPassword(String username, final SecureString password, ActionListener } void getReservedUserInfo(String username, ActionListener listener) { - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(null); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(PRIMARY_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(PRIMARY_SHARDS)); } else { securityIndex.checkIndexVersionThenExecute( listener::onFailure, @@ -648,11 +650,11 @@ public void onFailure(Exception e) { } void getAllReservedUserInfo(ActionListener> listener) { - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(Collections.emptyMap()); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); } else { securityIndex.checkIndexVersionThenExecute( listener::onFailure, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStore.java index 07651e0272df4..a16eeb7cf4e0f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStore.java @@ -65,6 +65,8 @@ import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; public class IndexServiceAccountTokenStore extends CachingServiceAccountTokenStore { @@ -168,11 +170,11 @@ void createToken( } void findTokensFor(ServiceAccountId accountId, ActionListener> listener) { - final SecurityIndexManager frozenSecurityIndex = this.securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = this.securityIndex.defensiveCopy(); if (false == frozenSecurityIndex.indexExists()) { listener.onResponse(List.of()); - } else if (false == frozenSecurityIndex.isAvailable()) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (false == frozenSecurityIndex.isAvailable(SEARCH_SHARDS)) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); } else { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { final Supplier contextSupplier = client.threadPool() @@ -204,11 +206,11 @@ void findTokensFor(ServiceAccountId accountId, ActionListener listener) { - final SecurityIndexManager frozenSecurityIndex = this.securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = this.securityIndex.defensiveCopy(); if (false == frozenSecurityIndex.indexExists()) { listener.onResponse(false); - } else if (false == frozenSecurityIndex.isAvailable()) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (false == frozenSecurityIndex.isAvailable(PRIMARY_SHARDS)) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(PRIMARY_SHARDS)); } else { final ServiceAccountId accountId = new ServiceAccountId(request.getNamespace(), request.getServiceName()); if (false == ServiceAccountService.isServiceAccountPrincipal(accountId.asPrincipal())) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index f49558ad6875d..18d9070d08a33 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -62,6 +62,8 @@ import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.isIndexDeleted; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.isMoveFromRedToNonRed; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; @@ -245,11 +247,11 @@ public void onFailure(Exception e) { } private void innerDeleteMapping(DeleteRoleMappingRequest request, ActionListener listener) { - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(false); - } else if (securityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (securityIndex.isAvailable(PRIMARY_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(PRIMARY_SHARDS)); } else { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin( @@ -293,19 +295,18 @@ public void getRoleMappings(Set names, ActionListener> listener) { - if (securityIndex.isAvailable()) { - loadMappings(listener); - } else { - logger.info("The security index is not yet available - no role mappings can be loaded"); - if (logger.isDebugEnabled()) { - logger.debug( - "Security Index [{}] [exists: {}] [available: {}]", - SECURITY_MAIN_ALIAS, - securityIndex.indexExists(), - securityIndex.isAvailable() - ); - } + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); + if (frozenSecurityIndex.indexExists() == false) { + logger.debug("The security does not index exist - no role mappings can be loaded"); + listener.onResponse(Collections.emptyList()); + } else if (frozenSecurityIndex.indexIsClosed()) { + logger.debug("The security index exists but is closed - no role mappings can be loaded"); listener.onResponse(Collections.emptyList()); + } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { + logger.debug("The security index exists but is not available - no role mappings can be loaded"); + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); + } else { + loadMappings(listener); } } @@ -319,7 +320,7 @@ private void getMappings(ActionListener> listener) { * */ public void usageStats(ActionListener> listener) { - if (securityIndex.isAvailable() == false) { + if (securityIndex.isAvailable(SEARCH_SHARDS) == false) { reportStats(listener, Collections.emptyList()); } else { getMappings(ActionListener.wrap(mappings -> reportStats(listener, mappings), listener::onFailure)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java index 10f5539b953b6..0e509c8af26b0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java @@ -74,6 +74,8 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor.DOC_TYPE_VALUE; import static org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor.Fields.APPLICATION; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; /** @@ -189,11 +191,11 @@ public void getPrivileges( private void innerGetPrivileges(Collection applications, ActionListener> listener) { assert applications != null && applications.size() > 0 : "Application names are required (found " + applications + ")"; - final SecurityIndexManager frozenSecurityIndex = securityIndexManager.freeze(); + final SecurityIndexManager frozenSecurityIndex = securityIndexManager.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(Collections.emptyList()); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); } else { securityIndexManager.checkIndexVersionThenExecute(listener::onFailure, () -> { @@ -419,11 +421,11 @@ public void deletePrivileges( WriteRequest.RefreshPolicy refreshPolicy, ActionListener>> listener ) { - final SecurityIndexManager frozenSecurityIndex = securityIndexManager.freeze(); + final SecurityIndexManager frozenSecurityIndex = securityIndexManager.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(Collections.emptyMap()); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(PRIMARY_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(PRIMARY_SHARDS)); } else { securityIndexManager.checkIndexVersionThenExecute(listener::onFailure, () -> { ActionListener groupListener = new GroupedActionListener<>(names.size(), ActionListener.wrap(responses -> { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index 085863fdb5e31..31875655a7ee9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -69,6 +69,8 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.security.SecurityField.DOCUMENT_LEVEL_SECURITY_FEATURE; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ROLE_TYPE; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; /** @@ -133,12 +135,12 @@ public void getRoleDescriptors(Set names, final ActionListener { QueryBuilder query = QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE); @@ -208,11 +210,11 @@ public void deleteRole(final DeleteRoleRequest deleteRoleRequest, final ActionLi return; } - final SecurityIndexManager frozenSecurityIndex = securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { listener.onResponse(false); - } else if (frozenSecurityIndex.isAvailable() == false) { - listener.onFailure(frozenSecurityIndex.getUnavailableReason()); + } else if (frozenSecurityIndex.isAvailable(PRIMARY_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(PRIMARY_SHARDS)); } else { securityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { DeleteRequest request = client.prepareDelete(SECURITY_MAIN_ALIAS, getIdForRole(deleteRoleRequest.name())).request(); @@ -309,7 +311,7 @@ public void onFailure(Exception e) { public void usageStats(ActionListener> listener) { Map usageStats = Maps.newMapWithExpectedSize(3); - if (securityIndex.isAvailable() == false) { + if (securityIndex.isAvailable(SEARCH_SHARDS) == false) { usageStats.put("size", 0L); usageStats.put("fls", false); usageStats.put("dls", false); @@ -406,12 +408,12 @@ public String toString() { } private void getRoleDescriptor(final String roleId, ActionListener resultListener) { - final SecurityIndexManager frozenSecurityIndex = this.securityIndex.freeze(); + final SecurityIndexManager frozenSecurityIndex = this.securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { // TODO remove this short circuiting and fix tests that fail without this! resultListener.onResponse(RoleRetrievalResult.success(Collections.emptySet())); - } else if (frozenSecurityIndex.isAvailable() == false) { - resultListener.onResponse(RoleRetrievalResult.failure(frozenSecurityIndex.getUnavailableReason())); + } else if (frozenSecurityIndex.isAvailable(PRIMARY_SHARDS) == false) { + resultListener.onResponse(RoleRetrievalResult.failure(frozenSecurityIndex.getUnavailableReason(PRIMARY_SHARDS))); } else { securityIndex.checkIndexVersionThenExecute( e -> resultListener.onResponse(RoleRetrievalResult.failure(e)), diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java index 7be1d0f96c043..054583d94cbb1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java @@ -99,6 +99,8 @@ import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_PROFILE_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.security.authc.Authentication.isFileOrNativeRealm; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.PRIMARY_SHARDS; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.Availability.SEARCH_SHARDS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ALIAS; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.VERSION_SECURITY_PROFILE_ORIGIN; @@ -261,7 +263,7 @@ public void suggestProfile(SuggestProfilesRequest request, TaskId parentTaskId, 0, new TotalHits(0, TotalHits.Relation.EQUAL_TO) ); - })).ifPresent(frozenProfileIndex -> { + }), SEARCH_SHARDS).ifPresent(frozenProfileIndex -> { final SearchRequest searchRequest = buildSearchRequestForSuggest(request, parentTaskId); frozenProfileIndex.checkIndexVersionThenExecute( @@ -334,7 +336,7 @@ public void usageStats(ActionListener> listener) { tryFreezeAndCheckIndex(listener.map(response -> { // index does not exist assert response == null : "only null response can reach here"; return Map.of("total", 0L, "enabled", 0L, "recent", 0L); - })).ifPresent(frozenProfileIndex -> { + }), SEARCH_SHARDS).ifPresent(frozenProfileIndex -> { final MultiSearchRequest multiSearchRequest = client.prepareMultiSearch() .add( client.prepareSearch(SECURITY_PROFILE_ALIAS) @@ -445,7 +447,7 @@ SearchRequest buildSearchRequestForSuggest(SuggestProfilesRequest request, TaskI } private void getVersionedDocument(String uid, ActionListener listener) { - tryFreezeAndCheckIndex(listener).ifPresent(frozenProfileIndex -> { + tryFreezeAndCheckIndex(listener, PRIMARY_SHARDS).ifPresent(frozenProfileIndex -> { final GetRequest getRequest = new GetRequest(SECURITY_PROFILE_ALIAS, uidToDocId(uid)); frozenProfileIndex.checkIndexVersionThenExecute( listener::onFailure, @@ -472,7 +474,7 @@ private void getVersionedDocuments(Collection uids, ActionListener { + tryFreezeAndCheckIndex(listener, PRIMARY_SHARDS).ifPresent(frozenProfileIndex -> { frozenProfileIndex.checkIndexVersionThenExecute( listener::onFailure, () -> new OriginSettingClient(client, getActionOrigin()).prepareMultiGet() @@ -544,7 +546,7 @@ private void searchVersionedDocumentsForSubjects( listener.onResponse(new SubjectSearchResultsAndErrors<>(List.of(), Map.of())); return; } - tryFreezeAndCheckIndex(listener).ifPresent(frozenProfileIndex -> { + tryFreezeAndCheckIndex(listener, SEARCH_SHARDS).ifPresent(frozenProfileIndex -> { frozenProfileIndex.checkIndexVersionThenExecute(listener::onFailure, () -> { final MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); subjects.forEach(subject -> multiSearchRequest.add(buildSearchRequestForSubject(subject))); @@ -1006,14 +1008,17 @@ private static XContentBuilder wrapProfileDocumentWithoutApplicationData(Profile * Freeze the profile index check its availability and return it if everything is ok. * Otherwise it calls the listener with null and returns an empty Optional. */ - private Optional tryFreezeAndCheckIndex(ActionListener listener) { - final SecurityIndexManager frozenProfileIndex = profileIndex.freeze(); + private Optional tryFreezeAndCheckIndex( + ActionListener listener, + SecurityIndexManager.Availability availability + ) { + final SecurityIndexManager frozenProfileIndex = profileIndex.defensiveCopy(); if (false == frozenProfileIndex.indexExists()) { logger.debug("profile index does not exist"); listener.onResponse(null); return Optional.empty(); - } else if (false == frozenProfileIndex.isAvailable()) { - listener.onFailure(frozenProfileIndex.getUnavailableReason()); + } else if (false == frozenProfileIndex.isAvailable(availability)) { + listener.onFailure(frozenProfileIndex.getUnavailableReason(availability)); return Optional.empty(); } return Optional.of(frozenProfileIndex); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index bdc25098f1760..62bb20322f185 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.core.Tuple; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -67,31 +68,45 @@ public class SecurityIndexManager implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(SecurityIndexManager.class); + /** + * When checking availability, check for availability of search or availability of all primaries + **/ + public enum Availability { + SEARCH_SHARDS, + PRIMARY_SHARDS + } + private final Client client; private final SystemIndexDescriptor systemIndexDescriptor; private final List> stateChangeListeners = new CopyOnWriteArrayList<>(); private volatile State state; + private final boolean defensiveCopy; public static SecurityIndexManager buildSecurityIndexManager( Client client, ClusterService clusterService, SystemIndexDescriptor descriptor ) { - final SecurityIndexManager securityIndexManager = new SecurityIndexManager(client, descriptor, State.UNRECOVERED_STATE); + final SecurityIndexManager securityIndexManager = new SecurityIndexManager(client, descriptor, State.UNRECOVERED_STATE, false); clusterService.addListener(securityIndexManager); return securityIndexManager; } - private SecurityIndexManager(Client client, SystemIndexDescriptor descriptor, State state) { + private SecurityIndexManager(Client client, SystemIndexDescriptor descriptor, State state, boolean defensiveCopy) { this.client = client; this.state = state; this.systemIndexDescriptor = descriptor; + this.defensiveCopy = defensiveCopy; } - public SecurityIndexManager freeze() { - return new SecurityIndexManager(null, systemIndexDescriptor, state); + /** + * Creates a defensive to protect against the underlying state changes. Should be called prior to making decisions and that same copy + * should be reused for multiple checks in the same workflow. + */ + public SecurityIndexManager defensiveCopy() { + return new SecurityIndexManager(null, systemIndexDescriptor, state, true); } public String aliasName() { @@ -102,6 +117,10 @@ public boolean indexExists() { return this.state.indexExists(); } + public boolean indexIsClosed() { + return this.state.indexState == IndexMetadata.State.CLOSE; + } + public Instant getCreationTime() { return this.state.creationTime; } @@ -114,8 +133,27 @@ public boolean isIndexUpToDate() { return this.state.isIndexUpToDate; } - public boolean isAvailable() { - return this.state.indexAvailable; + /** + * Optimization to avoid making unnecessary calls when we know the underlying shard state. This call will check that the index exists, + * is discoverable from the alias, is not closed, and will determine if available based on the {@link Availability} parameter. + * @param availability Check availability for search or write/update/real time get workflows. Write/update/realtime get workflows + * should check for availability of primary shards. Search workflows should check availability of search shards + * (which may or may not also be the primary shards). + * @return + * when checking for search: true if all searchable shards for the security index are available + * when checking for primary: true if all primary shards for the security index are available + */ + public boolean isAvailable(Availability availability) { + switch (availability) { + case SEARCH_SHARDS -> { + return this.state.indexAvailableForSearch; + } + case PRIMARY_SHARDS -> { + return this.state.indexAvailableForWrite; + } + } + // can never happen + throw new IllegalStateException("Unexpected availability enumeration. This is bug, please contact support."); } public boolean isMappingUpToDate() { @@ -126,19 +164,34 @@ public boolean isStateRecovered() { return this.state != State.UNRECOVERED_STATE; } - public ElasticsearchException getUnavailableReason() { - final State state = this.state; // use a local copy so all checks execute against the same state! - if (state.indexAvailable) { - throw new IllegalStateException("caller must make sure to use a frozen state and check indexAvailable"); + public ElasticsearchException getUnavailableReason(Availability availability) { + // ensure usage of a local copy so all checks execute against the same state! + if (defensiveCopy == false) { + throw new IllegalStateException("caller must make sure to use a defensive copy"); } - + final State state = this.state; if (state.indexState == IndexMetadata.State.CLOSE) { return new IndexClosedException(new Index(state.concreteIndexName, ClusterState.UNKNOWN_UUID)); } else if (state.indexExists()) { - return new UnavailableShardsException( - null, - "at least one primary shard for the index [" + state.concreteIndexName + "] is unavailable" - ); + assert state.indexAvailableForSearch == false || state.indexAvailableForWrite == false; + if (Availability.PRIMARY_SHARDS.equals(availability) && state.indexAvailableForWrite == false) { + return new UnavailableShardsException( + null, + "at least one primary shard for the index [" + state.concreteIndexName + "] is unavailable" + ); + } else if (Availability.SEARCH_SHARDS.equals(availability) && state.indexAvailableForSearch == false) { + // The current behavior is that when primaries are unavailable and replicas can not be promoted then + // any replicas will be marked as unavailable as well. This is applicable in stateless where there index only primaries + // with non-promotable replicas (i.e. search only shards). In the case "at least one search ... is unavailable" is + // a technically correct statement, but it may be unavailable because it is not promotable and the primary is unavailable + return new UnavailableShardsException( + null, + "at least one search shard for the index [" + state.concreteIndexName + "] is unavailable" + ); + } else { + // should never happen + throw new IllegalStateException("caller must ensure original availability matches the current availability"); + } } else { return new IndexNotFoundException(state.concreteIndexName); } @@ -174,7 +227,9 @@ public void clusterChanged(ClusterChangedEvent event) { final Instant creationTime = indexMetadata != null ? Instant.ofEpochMilli(indexMetadata.getCreationDate()) : null; final boolean isIndexUpToDate = indexMetadata == null || INDEX_FORMAT_SETTING.get(indexMetadata.getSettings()) == systemIndexDescriptor.getIndexFormat(); - final boolean indexAvailable = checkIndexAvailable(event.state()); + Tuple available = checkIndexAvailable(event.state()); + final boolean indexAvailableForWrite = available.v1(); + final boolean indexAvailableForSearch = available.v2(); final boolean mappingIsUpToDate = indexMetadata == null || checkIndexMappingUpToDate(event.state()); final Version mappingVersion = oldestIndexMappingVersion(event.state()); final String concreteIndexName = indexMetadata == null @@ -199,7 +254,8 @@ public void clusterChanged(ClusterChangedEvent event) { final State newState = new State( creationTime, isIndexUpToDate, - indexAvailable, + indexAvailableForSearch, + indexAvailableForWrite, mappingIsUpToDate, mappingVersion, concreteIndexName, @@ -230,24 +286,35 @@ public void onStateRecovered(Consumer recoveredStateConsumer) { stateChangeListeners.add(stateChangeListener); } - private boolean checkIndexAvailable(ClusterState state) { + private Tuple checkIndexAvailable(ClusterState state) { final String aliasName = systemIndexDescriptor.getAliasName(); IndexMetadata metadata = resolveConcreteIndex(aliasName, state.metadata()); if (metadata == null) { logger.debug("Index [{}] is not available - no metadata", aliasName); - return false; + return new Tuple<>(false, false); } if (metadata.getState() == IndexMetadata.State.CLOSE) { logger.warn("Index [{}] is closed", aliasName); - return false; + return new Tuple<>(false, false); } + boolean allPrimaryShards = false; + boolean searchShards = false; final IndexRoutingTable routingTable = state.routingTable().index(metadata.getIndex()); - if (routingTable == null || routingTable.allPrimaryShardsActive() == false) { - logger.debug("Index [{}] is not yet active", aliasName); - return false; - } else { - return true; + if (routingTable != null && routingTable.allPrimaryShardsActive()) { + allPrimaryShards = true; + } + if (routingTable != null && routingTable.readyForSearch(state)) { + searchShards = true; + } + if (allPrimaryShards == false || searchShards == false) { + logger.debug( + "Index [{}] is not fully available. all primary shards available [{}], search shards available, [{}]", + aliasName, + allPrimaryShards, + searchShards + ); } + return new Tuple<>(allPrimaryShards, searchShards); } private boolean checkIndexMappingUpToDate(ClusterState clusterState) { @@ -482,10 +549,11 @@ public static boolean isIndexDeleted(State previousState, State currentState) { * State of the security index. */ public static class State { - public static final State UNRECOVERED_STATE = new State(null, false, false, false, null, null, null, null, null, null); + public static final State UNRECOVERED_STATE = new State(null, false, false, false, false, null, null, null, null, null, null); public final Instant creationTime; public final boolean isIndexUpToDate; - public final boolean indexAvailable; + public final boolean indexAvailableForSearch; + public final boolean indexAvailableForWrite; public final boolean mappingUpToDate; public final Version mappingVersion; public final String concreteIndexName; @@ -497,7 +565,8 @@ public static class State { public State( Instant creationTime, boolean isIndexUpToDate, - boolean indexAvailable, + boolean indexAvailableForSearch, + boolean indexAvailableForWrite, boolean mappingUpToDate, Version mappingVersion, String concreteIndexName, @@ -508,7 +577,8 @@ public State( ) { this.creationTime = creationTime; this.isIndexUpToDate = isIndexUpToDate; - this.indexAvailable = indexAvailable; + this.indexAvailableForSearch = indexAvailableForSearch; + this.indexAvailableForWrite = indexAvailableForWrite; this.mappingUpToDate = mappingUpToDate; this.mappingVersion = mappingVersion; this.concreteIndexName = concreteIndexName; @@ -525,7 +595,8 @@ public boolean equals(Object o) { State state = (State) o; return Objects.equals(creationTime, state.creationTime) && isIndexUpToDate == state.isIndexUpToDate - && indexAvailable == state.indexAvailable + && indexAvailableForSearch == state.indexAvailableForSearch + && indexAvailableForWrite == state.indexAvailableForWrite && mappingUpToDate == state.mappingUpToDate && Objects.equals(mappingVersion, state.mappingVersion) && Objects.equals(concreteIndexName, state.concreteIndexName) @@ -543,7 +614,8 @@ public int hashCode() { return Objects.hash( creationTime, isIndexUpToDate, - indexAvailable, + indexAvailableForSearch, + indexAvailableForWrite, mappingUpToDate, mappingVersion, concreteIndexName, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java index 61aa0e22fd905..2a6fad9c81f53 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java @@ -173,8 +173,9 @@ public void setup() throws Exception { ((Runnable) inv.getArguments()[1]).run(); return null; }).when(securityIndex).checkIndexVersionThenExecute(anyConsumer(), any(Runnable.class)); - when(securityIndex.isAvailable()).thenReturn(true); - when(securityIndex.freeze()).thenReturn(securityIndex); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index f65dda28be125..a748de0c89413 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -263,12 +263,13 @@ protected void ((Runnable) inv.getArguments()[1]).run(); return null; }).when(securityIndex).checkIndexVersionThenExecute(anyConsumer(), any(Runnable.class)); - when(securityIndex.isAvailable()).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); when(securityIndex.indexExists()).thenReturn(true); when(securityIndex.isIndexUpToDate()).thenReturn(true); when(securityIndex.getCreationTime()).thenReturn(Clock.systemUTC().instant()); when(securityIndex.aliasName()).thenReturn(".security"); - when(securityIndex.freeze()).thenReturn(securityIndex); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); final MockLicenseState licenseState = mock(MockLicenseState.class); when(licenseState.isAllowed(Security.TOKEN_SERVICE_FEATURE)).thenReturn(true); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 9020d45041cea..e3631a785b9f3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -204,8 +204,9 @@ public void setup() throws Exception { ((Runnable) inv.getArguments()[1]).run(); return null; }).when(securityIndex).checkIndexVersionThenExecute(any(Consumer.class), any(Runnable.class)); - when(securityIndex.isAvailable()).thenReturn(true); - when(securityIndex.freeze()).thenReturn(securityIndex); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); final MockLicenseState licenseState = mock(MockLicenseState.class); when(licenseState.isAllowed(Security.TOKEN_SERVICE_FEATURE)).thenReturn(true); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java index a0f7892c3319d..6b9594c1c68ea 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/token/TransportInvalidateTokenActionTests.java @@ -78,10 +78,12 @@ public void setup() { } public void testInvalidateTokensWhenIndexUnavailable() throws Exception { - when(securityIndex.isAvailable()).thenReturn(false); + + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(false); when(securityIndex.indexExists()).thenReturn(true); - when(securityIndex.freeze()).thenReturn(securityIndex); - when(securityIndex.getUnavailableReason()).thenReturn(new ElasticsearchException("simulated")); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); + when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.PRIMARY_SHARDS)) + .thenReturn(new ElasticsearchException("simulated")); final TokenService tokenService = new TokenService( SETTINGS, Clock.systemUTC(), @@ -122,10 +124,10 @@ public void testInvalidateTokensWhenIndexUnavailable() throws Exception { } public void testInvalidateTokensWhenIndexClosed() throws Exception { - when(securityIndex.isAvailable()).thenReturn(false); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); when(securityIndex.indexExists()).thenReturn(true); - when(securityIndex.freeze()).thenReturn(securityIndex); - when(securityIndex.getUnavailableReason()).thenReturn( + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); + when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn( new IndexClosedException(new Index(INTERNAL_SECURITY_TOKENS_INDEX_7, ClusterState.UNKNOWN_UUID)) ); final TokenService tokenService = new TokenService( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java index 00f478f68b6ba..b6a1523b09784 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java @@ -110,7 +110,8 @@ public void terminateThreadPool() throws InterruptedException { public void testAnonymousUser() { NativeUsersStore usersStore = mock(NativeUsersStore.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(securityIndex.isAvailable()).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); AnonymousUser anonymousUser = new AnonymousUser(settings); ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, anonymousUser, threadPool); reservedRealm.initRealmRef( @@ -183,7 +184,8 @@ public void onFailure(Exception e) { public void testReservedUsersOnly() { NativeUsersStore usersStore = mock(NativeUsersStore.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(securityIndex.isAvailable()).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); ReservedRealm reservedRealm = new ReservedRealm( @@ -272,7 +274,8 @@ public void testGetAllUsers() { ); NativeUsersStore usersStore = mock(NativeUsersStore.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(securityIndex.isAvailable()).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); ReservedRealm reservedRealm = new ReservedRealm( mock(Environment.class), @@ -377,7 +380,8 @@ public void testGetUsersWithProfileUidException() { ); NativeUsersStore usersStore = mock(NativeUsersStore.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(securityIndex.isAvailable()).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); ReservedRealm reservedRealm = new ReservedRealm( mock(Environment.class), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 117d1f1fe14bb..a0a1b622cf36e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -877,7 +877,7 @@ public void testCrossClusterApiKeyUsageStatsAreZerosWhenIndexDoesNotExist() { public void testCrossClusterApiKeyUsageFailsWhenIndexNotAvailable() { securityIndex = SecurityMocks.mockSecurityIndexManager(".security", true, false); final ElasticsearchException expectedException = new ElasticsearchException("not available"); - when(securityIndex.getUnavailableReason()).thenReturn(expectedException); + when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(expectedException); final ApiKeyService apiKeyService = createApiKeyService(); final PlainActionFuture> future = new PlainActionFuture<>(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index cb4bbc383764c..cf343f790d85c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -1909,8 +1909,9 @@ public void testAuthenticateWithToken() throws Exception { String token = tokenFuture.get().getAccessToken(); when(client.prepareMultiGet()).thenReturn(new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE)); mockGetTokenFromAccessTokenBytes(tokenService, newTokenBytes.v1(), expected, Map.of(), false, null, client); - when(securityIndex.freeze()).thenReturn(securityIndex); - when(securityIndex.isAvailable()).thenReturn(true); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); when(securityIndex.indexExists()).thenReturn(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { threadContext.putHeader("Authorization", "Bearer " + token); @@ -2014,8 +2015,9 @@ public void testInvalidToken() throws Exception { } public void testExpiredToken() throws Exception { - when(securityIndex.freeze()).thenReturn(securityIndex); - when(securityIndex.isAvailable()).thenReturn(true); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); when(securityIndex.indexExists()).thenReturn(true); User user = new User("_username", "r1"); final Authentication expected = AuthenticationTestHelper.builder() @@ -2501,6 +2503,7 @@ private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { true, true, true, + true, null, concreteSecurityIndexName, indexStatus, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 4c276993381b5..35335fd5e4a53 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -922,14 +922,14 @@ public void testIndexNotAvailable() throws Exception { final SecurityIndexManager tokensIndex; if (pre72OldNode != null) { tokensIndex = securityMainIndex; - when(securityTokensIndex.isAvailable()).thenReturn(false); + when(securityTokensIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); when(securityTokensIndex.indexExists()).thenReturn(false); - when(securityTokensIndex.freeze()).thenReturn(securityTokensIndex); + when(securityTokensIndex.defensiveCopy()).thenReturn(securityTokensIndex); } else { tokensIndex = securityTokensIndex; - when(securityMainIndex.isAvailable()).thenReturn(false); + when(securityMainIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); when(securityMainIndex.indexExists()).thenReturn(false); - when(securityMainIndex.freeze()).thenReturn(securityMainIndex); + when(securityMainIndex.defensiveCopy()).thenReturn(securityMainIndex); } try (ThreadContext.StoredContext ignore = requestContext.newStoredContextPreservingResponseHeaders()) { PlainActionFuture future = new PlainActionFuture<>(); @@ -937,8 +937,10 @@ public void testIndexNotAvailable() throws Exception { tokenService.tryAuthenticateToken(bearerToken3, future); assertNull(future.get()); - when(tokensIndex.isAvailable()).thenReturn(false); - when(tokensIndex.getUnavailableReason()).thenReturn(new UnavailableShardsException(null, "unavailable")); + when(tokensIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); + when(tokensIndex.getUnavailableReason(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn( + new UnavailableShardsException(null, "unavailable") + ); when(tokensIndex.indexExists()).thenReturn(true); future = new PlainActionFuture<>(); final SecureString bearerToken2 = Authenticator.extractBearerTokenFromHeader(requestContext); @@ -951,7 +953,7 @@ public void testIndexNotAvailable() throws Exception { tokenService.tryAuthenticateToken(bearerToken1, future); assertNull(future.get()); - when(tokensIndex.isAvailable()).thenReturn(true); + when(tokensIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); when(tokensIndex.indexExists()).thenReturn(true); mockGetTokenFromAccessTokenBytes(tokenService, newTokenBytes.v1(), authentication, false, null); future = new PlainActionFuture<>(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java index 7dc5af1717fda..b9cc599609ea1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmTests.java @@ -37,6 +37,7 @@ private SecurityIndexManager.State dummyState(ClusterHealthStatus indexStatus) { true, true, true, + true, null, concreteSecurityIndexName, indexStatus, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java index 5f195477d57a0..4e364518bb7f3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java @@ -317,10 +317,11 @@ private void respondToGetUserRequest(String username, SecureString password, Str @SuppressWarnings("unchecked") private NativeUsersStore startNativeUsersStore() { SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(securityIndex.isAvailable()).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); when(securityIndex.indexExists()).thenReturn(true); when(securityIndex.isIndexUpToDate()).thenReturn(true); - when(securityIndex.freeze()).thenReturn(securityIndex); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); doAnswer((i) -> { Runnable action = (Runnable) i.getArguments()[1]; action.run(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java index afc6d5c17e135..e9af65bd8fc4a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java @@ -423,8 +423,11 @@ public void testRealmWithTemplatedRoleMapping() throws Exception { ActiveDirectorySessionFactory sessionFactory = new ActiveDirectorySessionFactory(config, sslService, threadPool); SecurityIndexManager mockSecurityIndex = mock(SecurityIndexManager.class); - when(mockSecurityIndex.isAvailable()).thenReturn(true); + when(mockSecurityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(mockSecurityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); when(mockSecurityIndex.isIndexUpToDate()).thenReturn(true); + when(mockSecurityIndex.indexExists()).thenReturn(true); + when(mockSecurityIndex.defensiveCopy()).thenReturn(mockSecurityIndex); Client mockClient = mock(Client.class); when(mockClient.threadPool()).thenReturn(threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java index 34c9a7e3b0b0f..9bbf4dd312d27 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java @@ -490,8 +490,11 @@ public void testLdapRealmWithTemplatedRoleMapping() throws Exception { RealmConfig config = getRealmConfig(REALM_IDENTIFIER, settings); SecurityIndexManager mockSecurityIndex = mock(SecurityIndexManager.class); - when(mockSecurityIndex.isAvailable()).thenReturn(true); + when(mockSecurityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(mockSecurityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); when(mockSecurityIndex.isIndexUpToDate()).thenReturn(true); + when(mockSecurityIndex.defensiveCopy()).thenReturn(mockSecurityIndex); + when(mockSecurityIndex.indexExists()).thenReturn(true); Client mockClient = mock(Client.class); when(mockClient.threadPool()).thenReturn(threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java index f536a696a8e23..2dec4eb8ea2b5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java @@ -132,10 +132,11 @@ protected void cacheInvalidatorRegistry = mock(CacheInvalidatorRegistry.class); securityIndex = mock(SecurityIndexManager.class); - when(securityIndex.isAvailable()).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); when(securityIndex.indexExists()).thenReturn(true); when(securityIndex.isIndexUpToDate()).thenReturn(true); - when(securityIndex.freeze()).thenReturn(securityIndex); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); doAnswer((i) -> { Runnable action = (Runnable) i.getArguments()[1]; action.run(); @@ -375,7 +376,7 @@ public void testDeleteToken() { public void testIndexStateIssues() { // Index not exists Mockito.reset(securityIndex); - when(securityIndex.freeze()).thenReturn(securityIndex); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); when(securityIndex.indexExists()).thenReturn(false); final ServiceAccountId accountId = new ServiceAccountId(randomAlphaOfLengthBetween(3, 8), randomAlphaOfLengthBetween(3, 8)); @@ -394,11 +395,13 @@ public void testIndexStateIssues() { // Index exists but not available Mockito.reset(securityIndex); - when(securityIndex.freeze()).thenReturn(securityIndex); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); when(securityIndex.indexExists()).thenReturn(true); - when(securityIndex.isAvailable()).thenReturn(false); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(false); final ElasticsearchException e = new ElasticsearchException("fail"); - when(securityIndex.getUnavailableReason()).thenReturn(e); + when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(e); + when(securityIndex.getUnavailableReason(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(e); final PlainActionFuture> future3 = new PlainActionFuture<>(); store.findTokensFor(accountId, future3); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 1a0634f3234a6..16ef229ed5436 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -125,7 +125,10 @@ public void testResolveRoles() throws Exception { ScriptModule.CORE_CONTEXTS, () -> 1L ); - when(securityIndex.isAvailable()).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); + when(securityIndex.indexExists()).thenReturn(true); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, securityIndex, scriptService) { @Override @@ -190,6 +193,7 @@ private SecurityIndexManager.State indexState(boolean isUpToDate, ClusterHealthS isUpToDate, true, true, + true, null, concreteSecurityIndexName, healthStatus, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index f30fb242abc13..46a78f1055a6f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -1527,6 +1527,7 @@ public SecurityIndexManager.State dummyIndexState(boolean isIndexUpToDate, Clust isIndexUpToDate, true, true, + true, null, concreteSecurityIndexName, healthStatus, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 10ed5c66f3c15..01d3ca6db354e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -126,9 +126,10 @@ public void searchScroll(SearchScrollRequest request, ActionListener { assertThat(invocationOnMock.getArguments().length, equalTo(2)); assertThat(invocationOnMock.getArguments()[1], instanceOf(Runnable.class)); @@ -976,6 +977,7 @@ private SecurityIndexManager.State dummyState( isIndexUpToDate, true, true, + true, null, concreteSecurityIndexName, healthStatus, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java index c9346ba488838..35efb12b278f2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/profile/ProfileServiceTests.java @@ -297,8 +297,8 @@ public void testGetProfileSubjectsNoIndex() throws Exception { assertThat(resultsAndErrors.errors().size(), is(0)); when(profileIndex.indexExists()).thenReturn(true); ElasticsearchException unavailableException = new ElasticsearchException("mock profile index unavailable"); - when(profileIndex.isAvailable()).thenReturn(false); - when(profileIndex.getUnavailableReason()).thenReturn(unavailableException); + when(profileIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(false); + when(profileIndex.getUnavailableReason(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(unavailableException); PlainActionFuture>> future2 = new PlainActionFuture<>(); profileService.getProfileSubjects(randomList(1, 5, () -> randomAlphaOfLength(20)), future2); ExecutionException e = expectThrows(ExecutionException.class, () -> future2.get()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java index d8fc00a2f1560..89d667de56c37 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/CacheInvalidatorRegistryTests.java @@ -59,6 +59,7 @@ public void testSecurityIndexStateChangeWillInvalidateAllRegisteredInvalidators( true, true, true, + true, Version.CURRENT, ".security", ClusterHealthStatus.GREEN, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index 1e0969c96c0de..c8f86957f84a3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -123,7 +123,8 @@ public void testIndexWithUpToDateMappingAndTemplate() { manager.clusterChanged(event(markShardsAvailable(clusterStateBuilder))); assertThat(manager.indexExists(), Matchers.equalTo(true)); - assertThat(manager.isAvailable(), Matchers.equalTo(true)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS), Matchers.equalTo(true)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS), Matchers.equalTo(true)); assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true)); } @@ -164,6 +165,96 @@ public void testIndexWithoutPrimaryShards() { assertIndexUpToDateButNotAvailable(); } + public void testIndexAvailability() { + assertInitialState(); + final ClusterState cs = createClusterState( + TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7, + SecuritySystemIndices.SECURITY_MAIN_ALIAS + ).build(); + Index index = cs.metadata().index(TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7).getIndex(); + ShardId shardId = new ShardId(index, 0); + ShardRouting primary = ShardRouting.newUnassigned( + shardId, + true, + RecoverySource.ExistingStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""), + ShardRouting.Role.INDEX_ONLY + ); + ShardRouting replica = ShardRouting.newUnassigned( + shardId, + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null), + ShardRouting.Role.SEARCH_ONLY + ); + String nodeId = ESTestCase.randomAlphaOfLength(8); + String nodeId2 = ESTestCase.randomAlphaOfLength(8); + + // primary/index unavailable, replica/search unavailable + IndexShardRoutingTable.Builder indxShardRoutingTableBuilder = IndexShardRoutingTable.builder(shardId) + .addShard( + primary.initialize(nodeId, null, primary.getExpectedShardSize()) + .moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "")) + ) + .addShard( + replica.initialize(nodeId2, null, replica.getExpectedShardSize()) + .moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "")) + ); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index).addIndexShard(indxShardRoutingTableBuilder); + RoutingTable routingTable = RoutingTable.builder().add(indexRoutingTableBuilder.build()).build(); + ClusterState.Builder clusterStateBuilder = ClusterState.builder(cs); + clusterStateBuilder.routingTable(routingTable); + ClusterState clusterState = clusterStateBuilder.build(); + manager.clusterChanged(event(clusterState)); + assertThat(manager.indexExists(), Matchers.equalTo(true)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS), Matchers.equalTo(false)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS), Matchers.equalTo(false)); + assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true)); + assertThat(manager.isStateRecovered(), Matchers.equalTo(true)); + + // primary/index available, replica/search available + indxShardRoutingTableBuilder = IndexShardRoutingTable.builder(shardId) + .addShard( + primary.initialize(nodeId, null, primary.getExpectedShardSize()).moveToStarted(ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE) + ) + .addShard( + replica.initialize(nodeId2, null, replica.getExpectedShardSize()) + .moveToStarted(ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE) // start replica + ); + indexRoutingTableBuilder = IndexRoutingTable.builder(index).addIndexShard(indxShardRoutingTableBuilder); + routingTable = RoutingTable.builder().add(indexRoutingTableBuilder.build()).build(); + clusterStateBuilder = ClusterState.builder(cs); + clusterStateBuilder.routingTable(routingTable); + clusterState = clusterStateBuilder.build(); + manager.clusterChanged(event(clusterState)); + assertThat(manager.indexExists(), Matchers.equalTo(true)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS), Matchers.equalTo(true)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS), Matchers.equalTo(true)); + assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true)); + assertThat(manager.isStateRecovered(), Matchers.equalTo(true)); + + // primary/index available, replica/search unavailable + indxShardRoutingTableBuilder = IndexShardRoutingTable.builder(shardId) + .addShard( + primary.initialize(nodeId, null, primary.getExpectedShardSize()).moveToStarted(ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE) + ) + .addShard(replica.initialize(nodeId2, null, replica.getExpectedShardSize())); // initialized, but not started + indexRoutingTableBuilder = IndexRoutingTable.builder(index).addIndexShard(indxShardRoutingTableBuilder); + routingTable = RoutingTable.builder().add(indexRoutingTableBuilder.build()).build(); + clusterStateBuilder = ClusterState.builder(cs); + clusterStateBuilder.routingTable(routingTable); + clusterState = clusterStateBuilder.build(); + manager.clusterChanged(event(clusterState)); + assertThat(manager.indexExists(), Matchers.equalTo(true)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS), Matchers.equalTo(false)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS), Matchers.equalTo(true)); + assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true)); + assertThat(manager.isStateRecovered(), Matchers.equalTo(true)); + + // primary/index unavailable, replica/search available + // it is not currently possibly to have unassigned primaries with assigned replicas + } + private ClusterChangedEvent event(ClusterState clusterState) { return new ClusterChangedEvent("test-event", clusterState, EMPTY_CLUSTER_STATE); } @@ -419,7 +510,8 @@ public void testProcessClosedIndexState() { ); manager.clusterChanged(event(markShardsAvailable(indexAvailable))); assertThat(manager.indexExists(), is(true)); - assertThat(manager.isAvailable(), is(true)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS), is(true)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS), is(true)); // Now close it ClusterState.Builder indexClosed = createClusterState( @@ -436,19 +528,22 @@ public void testProcessClosedIndexState() { manager.clusterChanged(event(indexClosed.build())); assertThat(manager.indexExists(), is(true)); - assertThat(manager.isAvailable(), is(false)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS), is(false)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS), is(false)); } private void assertInitialState() { assertThat(manager.indexExists(), Matchers.equalTo(false)); - assertThat(manager.isAvailable(), Matchers.equalTo(false)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS), Matchers.equalTo(false)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS), Matchers.equalTo(false)); assertThat(manager.isMappingUpToDate(), Matchers.equalTo(false)); assertThat(manager.isStateRecovered(), Matchers.equalTo(false)); } private void assertIndexUpToDateButNotAvailable() { assertThat(manager.indexExists(), Matchers.equalTo(true)); - assertThat(manager.isAvailable(), Matchers.equalTo(false)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS), Matchers.equalTo(false)); + assertThat(manager.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS), Matchers.equalTo(false)); assertThat(manager.isMappingUpToDate(), Matchers.equalTo(true)); assertThat(manager.isStateRecovered(), Matchers.equalTo(true)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java index 82b7b312465d3..a15d8409fe2b4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityMocks.java @@ -86,9 +86,10 @@ public static SecurityIndexManager mockSecurityIndexManager(String alias, boolea return null; }).when(securityIndexManager).checkIndexVersionThenExecute(anyConsumer(), any(Runnable.class)); when(securityIndexManager.indexExists()).thenReturn(exists); - when(securityIndexManager.isAvailable()).thenReturn(available); + when(securityIndexManager.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(available); + when(securityIndexManager.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(available); when(securityIndexManager.aliasName()).thenReturn(alias); - when(securityIndexManager.freeze()).thenReturn(securityIndexManager); + when(securityIndexManager.defensiveCopy()).thenReturn(securityIndexManager); return securityIndexManager; } From 7864b923118128e56fd4825a4c1b4158a280365c Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 5 Oct 2023 09:19:16 -0700 Subject: [PATCH 007/176] AwaitsFix #100341 (#100349) Relates #100341 --- .../org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java index 4bbcff44ec740..dc305f95325b2 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.action; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.FollowersChecker; @@ -30,6 +31,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @ESIntegTestCase.ClusterScope(scope = TEST, minNumDataNodes = 2, maxNumDataNodes = 4) +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100341") public class EsqlDisruptionIT extends EsqlActionIT { // copied from AbstractDisruptionTestCase From ad0a26687caf2299748a0b4362c330ac915a7b7e Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Thu, 5 Oct 2023 19:48:39 +0300 Subject: [PATCH 008/176] ESQL: Remove aliasing inside Eval (#100238) Evals that introduce aliased can be simplified by extracting them into a project (and thus signaling there's no underlying processing). The following eval: eval x = a + 1, y = x, z = y + 1, y = z, w = y + 1 can be converted into: eval x = a + 1, z = a + 1 + 1, w = a + 1 + 1 | project x, z, z as y, w Fix #100174 Fix #100050 --- docs/changelog/100238.yaml | 6 + .../xpack/esql/CsvTestUtils.java | 1 + .../src/main/resources/rename.csv-spec | 27 ++ .../esql/optimizer/LogicalPlanOptimizer.java | 90 ++++- .../xpack/esql/planner/Layout.java | 7 +- .../LocalPhysicalPlanOptimizerTests.java | 25 +- .../optimizer/LogicalPlanOptimizerTests.java | 337 ++++++++++++++++-- .../optimizer/PhysicalPlanOptimizerTests.java | 42 +-- .../xpack/ql/expression/Alias.java | 6 +- 9 files changed, 476 insertions(+), 65 deletions(-) create mode 100644 docs/changelog/100238.yaml diff --git a/docs/changelog/100238.yaml b/docs/changelog/100238.yaml new file mode 100644 index 0000000000000..70e3f5340e223 --- /dev/null +++ b/docs/changelog/100238.yaml @@ -0,0 +1,6 @@ +pr: 100238 +summary: "ESQL: Remove aliasing inside Eval" +area: ES|QL +type: bug +issues: + - 100174 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index 1655686ab4f20..953fb65bd1eec 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -342,6 +342,7 @@ public enum Type { LOOKUP.put("BYTE", INTEGER); // add also the types with short names + LOOKUP.put("BOOL", BOOLEAN); LOOKUP.put("I", INTEGER); LOOKUP.put("L", LONG); LOOKUP.put("UL", UNSIGNED_LONG); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec index 799f8821d97da..5e5c70e3cbba7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec @@ -93,6 +93,16 @@ x:integer | y:integer | x2:integer | y2:integer 4 | 4 | 5 | 6 ; +duplicateProjectEval +from employees | eval y = languages, x = languages | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3; + +x:integer | y:integer | x2:integer | y2:integer +2 | 2 | 3 | 4 +5 | 5 | 6 | 7 +4 | 4 | 5 | 6 +; + + renameWithFilterPushedToES from employees | sort emp_no | rename emp_no as x | keep languages, first_name, last_name, x | where x > 10030 and x < 10040 | limit 5; @@ -147,3 +157,20 @@ y:integer | x:date 10076 | 1985-07-09T00:00:00.000Z 10061 | 1985-09-17T00:00:00.000Z ; + +renameIntertwinedWithSort +FROM employees | eval x = salary | rename x as y | rename y as x | sort x | rename x as y | limit 10; + +avg_worked_seconds:l | birth_date:date | emp_no:i | first_name:s | gender:s | height:d | height.float:d | height.half_float:d | height.scaled_float:d| hire_date:date | is_rehired:bool | job_positions:s | languages:i | languages.byte:i | languages.long:l | languages.short:i | last_name:s | salary:i | salary_change:d | salary_change.int:i | salary_change.keyword:s | salary_change.long:l | still_hired:bool | y:i + +390266432 | 1959-08-19T00:00:00.000Z | 10015 | Guoxiang | null | 1.66 | 1.659999966621399 | 1.66015625 | 1.6600000000000001 | 1987-07-02T00:00:00.000Z | [false, false, false, true]| [Head Human Resources, Junior Developer, Principal Support Engineer, Support Engineer] | 5 | 5 | 5 | 5 | Nooteboom | 25324 | [12.4, 14.25] | [12, 14] | [12.40, 14.25] | [12, 14] | true | 25324 +203838153 | 1953-02-08T00:00:00.000Z | 10035 | null | M | 1.81 | 1.809999942779541 | 1.8095703125 | 1.81 | 1988-09-05T00:00:00.000Z | false | [Data Scientist, Senior Python Developer] | 5 | 5 | 5 | 5 | Chappelet | 25945 | [-6.58, -2.54] | [-6, -2] | [-2.54, -6.58] | [-6, -2] | false | 25945 +313407352 | 1964-10-18T00:00:00.000Z | 10092 | Valdiodio | F | 1.75 | 1.75 | 1.75 | 1.75 | 1989-09-22T00:00:00.000Z | [false, false, true, true] | [Accountant, Junior Developer] | 1 | 1 | 1 | 1 | Niizuma | 25976 | [-6.77, 0.39, 8.3, 8.78] | [-6, 0, 8, 8] | [-6.77, 0.39, 8.30,8.78] | [-6, 0, 8, 8] | false | 25976 +248451647 | null | 10048 | Florian | M | 2.0 | 2.0 | 2.0 | 2.0 | 1985-02-24T00:00:00.000Z | [true, true] | Internship | 3 | 3 | 3 | 3 | Syrotiuk | 26436 | null | null | null | null | false | 26436 +324356269 | 1954-05-30T00:00:00.000Z | 10057 | Ebbe | F | 1.59 | 1.590000033378601 | 1.58984375 | 1.59 | 1992-01-15T00:00:00.000Z | null | [Head Human Resources, Python Developer] | 4 | 4 | 4 | 4 | Callaway | 27215 | [-6.73, -5.27, -2.43, 1.03] | [-6, -5, -2, 1] | [-2.43, -5.27, -6.73, 1.03]| [-6, -5, -2, 1] | true | 27215 +359067056 | 1960-05-25T00:00:00.000Z | 10084 | Tuval | M | 1.51 | 1.5099999904632568 | 1.509765625 | 1.51 | 1995-12-15T00:00:00.000Z | false | Principal Support Engineer | 1 | 1 | 1 | 1 | Kalloufi | 28035 | null | null | null | null | true | 28035 +359208133 | 1953-04-03T00:00:00.000Z | 10026 | Yongqiao | M | 2.1 | 2.0999999046325684 | 2.099609375 | 2.1 | 1995-03-20T00:00:00.000Z | [false, true] | Reporting Analyst | null | null | null | null | Berztiss | 28336 | [-7.37, 10.62, 11.20] | [-7, 10, 11] | [-7.37, 10.62, 11.20] | [-7, 10, 11] | true | 28336 +233999584 | 1962-11-26T00:00:00.000Z | 10068 | Charlene | M | 1.58 | 1.5800000429153442 | 1.580078125 | 1.58 | 1987-08-07T00:00:00.000Z | true | Architect | 3 | 3 | 3 | 3 | Brattka | 28941 | [-5.61, -5.29, 3.43] | [-5, -5, 3] | [-5.29, -5.61, 3.43] | [-5, -5, 3] | true | 28941 +341158890 | 1961-10-15T00:00:00.000Z | 10060 | Breannda | M | 1.42 | 1.4199999570846558 | 1.419921875 | 1.42 | 1987-11-02T00:00:00.000Z | [false, false, false, true]| [Business Analyst, Data Scientist, Senior Team Lead] | 2 | 2 | 2 | 2 | Billingsley | 29175 | [-1.76, -0.85] | [-1, 0] | [-0.85, -1.76] | [-1, 0] | true | 29175 +246355863 | null | 10042 | Magy | F | 1.44 | 1.440000057220459 | 1.4404296875 | 1.44 | 1993-03-21T00:00:00.000Z | null | [Architect, Business Analyst, Internship, Junior Developer] | 3 | 3 | 3 | 3 | Stamatiou | 30404 | [-9.28, 9.42] | [-9, 9] | [-9.28, 9.42] | [-9, 9] | true | 30404 +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 8d060a739e8f8..bd02eb9cd4b8e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -87,8 +87,13 @@ protected List> batches() { } protected static List> rules() { - var substitutions = new Batch<>("Substitutions", Limiter.ONCE, new SubstituteSurrogates(), new ReplaceRegexMatch() - // new ReplaceTextFieldAttributesWithTheKeywordSubfield() + var substitutions = new Batch<>( + "Substitutions", + Limiter.ONCE, + new SubstituteSurrogates(), + new ReplaceRegexMatch(), + new ReplaceAliasingEvalWithProject() + // new ReplaceTextFieldAttributesWithTheKeywordSubfield() ); var operators = new Batch<>( @@ -851,4 +856,85 @@ protected Expression regexToEquals(RegexMatch regexMatch, Literal literal) { return new Equals(regexMatch.source(), regexMatch.field(), literal); } } + + /** + * Replace aliasing evals (eval x=a) with a projection which can be further combined / simplified. + * The rule gets applied only if there's another project (Project/Stats) above it. + * + * Needs to take into account shadowing of potentially intermediate fields: + * eval x = a + 1, y = x, z = y + 1, y = z, w = y + 1 + * The output should be + * eval x = a + 1, z = a + 1 + 1, w = a + 1 + 1 + * project x, z, z as y, w + */ + static class ReplaceAliasingEvalWithProject extends Rule { + + @Override + public LogicalPlan apply(LogicalPlan logicalPlan) { + Holder enabled = new Holder<>(false); + + return logicalPlan.transformDown(p -> { + // found projection, turn enable flag on + if (p instanceof Aggregate || p instanceof Project) { + enabled.set(true); + } else if (enabled.get() && p instanceof Eval eval) { + p = rule(eval); + } + + return p; + }); + } + + private LogicalPlan rule(Eval eval) { + LogicalPlan plan = eval; + + // holds simple aliases such as b = a, c = b, d = c + AttributeMap basicAliases = new AttributeMap<>(); + // same as above but keeps the original expression + AttributeMap basicAliasSources = new AttributeMap<>(); + + List keptFields = new ArrayList<>(); + + var fields = eval.fields(); + for (int i = 0, size = fields.size(); i < size; i++) { + Alias field = fields.get(i); + Expression child = field.child(); + var attribute = field.toAttribute(); + // put the aliases in a separate map to separate the underlying resolve from other aliases + if (child instanceof Attribute) { + basicAliases.put(attribute, child); + basicAliasSources.put(attribute, field); + } else { + // be lazy and start replacing name aliases only if needed + if (basicAliases.size() > 0) { + // update the child through the field + field = (Alias) field.transformUp(e -> basicAliases.resolve(e, e)); + } + keptFields.add(field); + } + } + + // at least one alias encountered, move it into a project + if (basicAliases.size() > 0) { + // preserve the eval output (takes care of shadowing and order) but replace the basic aliases + List projections = new ArrayList<>(eval.output()); + // replace the removed aliases with their initial definition - however use the output to preserve the shadowing + for (int i = projections.size() - 1; i >= 0; i--) { + NamedExpression project = projections.get(i); + projections.set(i, basicAliasSources.getOrDefault(project, project)); + } + + LogicalPlan child = eval.child(); + if (keptFields.size() > 0) { + // replace the eval with just the kept fields + child = new Eval(eval.source(), eval.child(), keptFields); + } + // put the projection in place + plan = new Project(eval.source(), child, projections); + } + + return plan; + } + + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Layout.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Layout.java index 8b91b9818a65a..871d3751b225d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Layout.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Layout.java @@ -111,9 +111,10 @@ public Layout build() { for (NameId id : set.nameIds) { ChannelAndType next = new ChannelAndType(channel, set.type); ChannelAndType prev = layout.put(id, next); - if (prev != null) { - throw new IllegalArgumentException("Name [" + id + "] is on two channels [" + prev + "] and [" + next + "]"); - } + // Do allow multiple name to point to the same channel - see https://github.com/elastic/elasticsearch/pull/100238 + // if (prev != null) { + // throw new IllegalArgumentException("Name [" + id + "] is on two channels [" + prev + "] and [" + next + "]"); + // } } } return new DefaultLayout(Collections.unmodifiableMap(layout), numberOfChannels); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 3cab0604b0688..80fd51cacd163 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.EsStatsQueryExec.Stat; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; -import org.elasticsearch.xpack.esql.plan.physical.EvalExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeExec; import org.elasticsearch.xpack.esql.plan.physical.LimitExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -208,27 +207,31 @@ public void testCountFieldWithFilter() { } /** - * Expects - for now + * Expects * LimitExec[500[INTEGER]] - * \_AggregateExec[[],[COUNT(hidden_s{r}#8) AS c],FINAL,null] + * \_AggregateExec[[],[COUNT(salary{f}#20) AS c],FINAL,null] * \_ExchangeExec[[count{r}#25, seen{r}#26],true] - * \_AggregateExec[[],[COUNT(hidden_s{r}#8) AS c],PARTIAL,8] - * \_EvalExec[[salary{f}#20 AS s, s{r}#3 AS hidden_s]] - * \_FieldExtractExec[salary{f}#20] - * \_EsQueryExec[test], query[{"esql_single_value":{"field":"emp_no","next":{"range":{"emp_no":{"lt":10050,"boost":1.0}}}}}] - * [_doc{f}#42], limit[], sort[] estimatedRowSize[16] + * \_EsStatsQueryExec[test], stats[Stat[name=salary, type=COUNT, query={ + * "exists" : { + * "field" : "salary", + * "boost" : 1.0 + * } */ - // TODO: the eval is not yet optimized away public void testCountFieldWithEval() { var plan = plan(""" from test | eval s = salary | rename s as sr | eval hidden_s = sr | rename emp_no as e | where e < 10050 | stats c = count(hidden_s) """, IS_SV_STATS); + var limit = as(plan, LimitExec.class); var agg = as(limit.child(), AggregateExec.class); var exg = as(agg.child(), ExchangeExec.class); - agg = as(exg.child(), AggregateExec.class); - var eval = as(agg.child(), EvalExec.class); + var esStatsQuery = as(exg.child(), EsStatsQueryExec.class); + + assertThat(esStatsQuery.limit(), is(nullValue())); + assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + var stat = as(esStatsQuery.stats().get(0), Stat.class); + assertThat(stat.query(), is(QueryBuilders.existsQuery("salary"))); } // optimized doesn't know yet how to push down count over field diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 78cc971ceb61b..848a7bc12aeef 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Min; import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; import org.elasticsearch.xpack.esql.expression.function.aggregate.Sum; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; @@ -55,6 +56,7 @@ import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.Nullability; import org.elasticsearch.xpack.ql.expression.ReferenceAttribute; +import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.ql.expression.predicate.logical.And; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; @@ -1250,14 +1252,13 @@ public void testInvalidFoldDueToReplacement() { | keep x """); - var project = as(plan, EsqlProject.class); + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("x")); + var child = aliased(project.projections().get(0), FieldAttribute.class); + assertThat(Expressions.name(child), is("emp_no")); var limit = as(project.child(), Limit.class); var filter = as(limit.child(), Filter.class); - var eval = as(filter.child(), Eval.class); - assertThat(eval.fields(), hasSize(1)); - var alias = as(eval.fields().get(0), Alias.class); - assertThat(Expressions.name(alias.child()), is("emp_no")); - var source = as(eval.child(), EsRelation.class); + var source = as(filter.child(), EsRelation.class); } public void testEnrich() { @@ -1509,6 +1510,12 @@ public void testPruneChainedEval() { var source = as(limit.child(), EsRelation.class); } + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[],[COUNT(salary{f}#1345) AS c]] + * \_EsRelation[test][_meta_field{f}#1346, emp_no{f}#1340, first_name{f}#..] + */ public void testPruneEvalDueToStats() { var plan = plan(""" from test @@ -1519,14 +1526,10 @@ public void testPruneEvalDueToStats() { var limit = as(plan, Limit.class); var aggregate = as(limit.child(), Aggregate.class); - assertThat(aggregate.aggregates(), hasSize(1)); - var alias = as(aggregate.aggregates().get(0), Alias.class); - var count = as(alias.child(), Count.class); - var eval = as(aggregate.child(), Eval.class); - assertThat(eval.fields(), hasSize(1)); - var field = as(eval.fields().get(0), Alias.class); - assertThat(field.name(), is("x")); - var source = as(eval.child(), EsRelation.class); + var aggs = aggregate.aggregates(); + assertThat(Expressions.names(aggs), contains("c")); + aggFieldName(aggs.get(0), Count.class, "salary"); + var source = as(aggregate.child(), EsRelation.class); } public void testPruneUnusedAggSimple() { @@ -1546,6 +1549,12 @@ public void testPruneUnusedAggSimple() { var source = as(agg.child(), EsRelation.class); } + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[],[COUNT(salary{f}#19) AS x]] + * \_EsRelation[test][_meta_field{f}#20, emp_no{f}#14, first_name{f}#15, ..] + */ public void testPruneUnusedAggMixedWithEval() { var plan = plan(""" from test @@ -1554,15 +1563,13 @@ public void testPruneUnusedAggMixedWithEval() { | keep x """); - var project = as(plan, Project.class); - var eval = as(project.child(), Eval.class); - var limit = as(eval.child(), Limit.class); + var limit = as(plan, Limit.class); var agg = as(limit.child(), Aggregate.class); assertThat(agg.groupings(), hasSize(0)); - assertThat(agg.aggregates(), hasSize(1)); - var aggOne = as(agg.aggregates().get(0), Alias.class); - assertThat(aggOne.name(), is("c")); - var count = as(aggOne.child(), Count.class); + var aggs = agg.aggregates(); + assertThat(aggs, hasSize(1)); + assertThat(Expressions.names(aggs), contains("x")); + aggFieldName(agg.aggregates().get(0), Count.class, "salary"); var source = as(agg.child(), EsRelation.class); } @@ -1586,6 +1593,14 @@ public void testPruneUnusedAggsChainedAgg() { var source = as(agg.child(), EsRelation.class); } + /** + * Expects + * Project[[c{r}#342]] + * \_Limit[500[INTEGER]] + * \_Filter[min{r}#348 > 10[INTEGER]] + * \_Aggregate[[],[COUNT(salary{f}#367) AS c, MIN(salary{f}#367) AS min]] + * \_EsRelation[test][_meta_field{f}#368, emp_no{f}#362, first_name{f}#36..] + */ public void testPruneMixedAggInsideUnusedEval() { var plan = plan(""" from test @@ -1600,15 +1615,22 @@ public void testPruneMixedAggInsideUnusedEval() { var project = as(plan, Project.class); var limit = as(project.child(), Limit.class); var filter = as(limit.child(), Filter.class); - var eval = as(filter.child(), Eval.class); - var agg = as(eval.child(), Aggregate.class); + var agg = as(filter.child(), Aggregate.class); assertThat(agg.groupings(), hasSize(0)); var aggs = agg.aggregates(); - assertThat(aggs, hasSize(2)); assertThat(Expressions.names(aggs), contains("c", "min")); + aggFieldName(aggs.get(0), Count.class, "salary"); + aggFieldName(aggs.get(1), Min.class, "salary"); var source = as(agg.child(), EsRelation.class); } + /** + * Expects + * Eval[[max{r}#6 + min{r}#9 + c{r}#3 AS x, min{r}#9 AS y, c{r}#3 AS z]] + * \_Limit[500[INTEGER]] + * \_Aggregate[[],[COUNT(salary{f}#26) AS c, MAX(salary{f}#26) AS max, MIN(salary{f}#26) AS min]] + * \_EsRelation[test][_meta_field{f}#27, emp_no{f}#21, first_name{f}#22, ..] + */ public void testNoPruningWhenDealingJustWithEvals() { var plan = plan(""" from test @@ -1623,6 +1645,13 @@ public void testNoPruningWhenDealingJustWithEvals() { var agg = as(limit.child(), Aggregate.class); } + /** + * Expects + * Project[[y{r}#6 AS z]] + * \_Eval[[emp_no{f}#11 + 1[INTEGER] AS y]] + * \_Limit[500[INTEGER]] + * \_EsRelation[test][_meta_field{f}#17, emp_no{f}#11, first_name{f}#12, ..] + */ public void testNoPruningWhenChainedEvals() { var plan = plan(""" from test @@ -1631,12 +1660,19 @@ public void testNoPruningWhenChainedEvals() { """); var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("z")); var eval = as(project.child(), Eval.class); - assertThat(Expressions.names(eval.fields()), contains("x", "y", "z")); + assertThat(Expressions.names(eval.fields()), contains("y")); var limit = as(eval.child(), Limit.class); var source = as(limit.child(), EsRelation.class); } + /** + * Expects + * Project[[salary{f}#20 AS x, emp_no{f}#15 AS y]] + * \_Limit[500[INTEGER]] + * \_EsRelation[test][_meta_field{f}#21, emp_no{f}#15, first_name{f}#16, ..] + */ public void testPruningDuplicateEvals() { var plan = plan(""" from test @@ -1647,12 +1683,257 @@ public void testPruningDuplicateEvals() { """); var project = as(plan, Project.class); - var eval = as(project.child(), Eval.class); - assertThat(Expressions.names(eval.fields()), contains("x", "y")); - var limit = as(eval.child(), Limit.class); + var projections = project.projections(); + assertThat(Expressions.names(projections), contains("x", "y")); + var child = aliased(projections.get(0), FieldAttribute.class); + assertThat(child.name(), is("salary")); + child = aliased(projections.get(1), FieldAttribute.class); + assertThat(child.name(), is("emp_no")); + + var limit = as(project.child(), Limit.class); var source = as(limit.child(), EsRelation.class); } + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[],[COUNT(salary{f}#24) AS cx, COUNT(emp_no{f}#19) AS cy]] + * \_EsRelation[test][_meta_field{f}#25, emp_no{f}#19, first_name{f}#20, ..] + */ + public void testPruneEvalAliasOnAggUngrouped() { + var plan = plan(""" + from test + | eval x = emp_no, x = salary + | eval y = salary + | eval y = emp_no + | stats cx = count(x), cy = count(y) + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("cx", "cy")); + aggFieldName(aggs.get(0), Count.class, "salary"); + aggFieldName(aggs.get(1), Count.class, "emp_no"); + var source = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[x{r}#6],[COUNT(emp_no{f}#17) AS cy, salary{f}#22 AS x]] + * \_EsRelation[test][_meta_field{f}#23, emp_no{f}#17, first_name{f}#18, ..] + */ + public void testPruneEvalAliasOnAggGroupedByAlias() { + var plan = plan(""" + from test + | eval x = emp_no, x = salary + | eval y = salary + | eval y = emp_no + | stats cy = count(y) by x + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("cy", "x")); + aggFieldName(aggs.get(0), Count.class, "emp_no"); + var x = aliased(aggs.get(1), FieldAttribute.class); + assertThat(x.name(), is("salary")); + var source = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[gender{f}#22],[COUNT(emp_no{f}#20) AS cy, MIN(salary{f}#25) AS cx, gender{f}#22]] + * \_EsRelation[test][_meta_field{f}#26, emp_no{f}#20, first_name{f}#21, ..] + */ + public void testPruneEvalAliasOnAggGrouped() { + var plan = plan(""" + from test + | eval x = emp_no, x = salary + | eval y = salary + | eval y = emp_no + | stats cy = count(y), cx = min(x) by gender + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("cy", "cx", "gender")); + aggFieldName(aggs.get(0), Count.class, "emp_no"); + aggFieldName(aggs.get(1), Min.class, "salary"); + var by = as(aggs.get(2), FieldAttribute.class); + assertThat(Expressions.name(by), is("gender")); + var source = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[gender{f}#21],[COUNT(emp_no{f}#19) AS cy, MIN(salary{f}#24) AS cx, gender{f}#21]] + * \_EsRelation[test][_meta_field{f}#25, emp_no{f}#19, first_name{f}#20, ..] + */ + public void testPruneEvalAliasMixedWithRenameOnAggGrouped() { + var plan = plan(""" + from test + | eval x = emp_no, x = salary + | rename salary as x + | eval y = emp_no + | stats cy = count(y), cx = min(x) by gender + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("cy", "cx", "gender")); + aggFieldName(aggs.get(0), Count.class, "emp_no"); + aggFieldName(aggs.get(1), Min.class, "salary"); + var by = as(aggs.get(2), FieldAttribute.class); + assertThat(Expressions.name(by), is("gender")); + var source = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[gender{f}#19],[COUNT(x{r}#3) AS cy, MIN(x{r}#3) AS cx, gender{f}#19]] + * \_Eval[[emp_no{f}#17 + 1[INTEGER] AS x]] + * \_EsRelation[test][_meta_field{f}#23, emp_no{f}#17, first_name{f}#18, ..] + */ + public void testEvalAliasingAcrossCommands() { + var plan = plan(""" + from test + | eval x = emp_no + 1 + | eval y = x + | eval z = y + 1 + | stats cy = count(y), cx = min(x) by gender + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("cy", "cx", "gender")); + aggFieldName(aggs.get(0), Count.class, "x"); + aggFieldName(aggs.get(1), Min.class, "x"); + var by = as(aggs.get(2), FieldAttribute.class); + assertThat(Expressions.name(by), is("gender")); + var eval = as(agg.child(), Eval.class); + assertThat(Expressions.names(eval.fields()), contains("x")); + var source = as(eval.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[gender{f}#19],[COUNT(x{r}#3) AS cy, MIN(x{r}#3) AS cx, gender{f}#19]] + * \_Eval[[emp_no{f}#17 + 1[INTEGER] AS x]] + * \_EsRelation[test][_meta_field{f}#23, emp_no{f}#17, first_name{f}#18, ..] + */ + public void testEvalAliasingInsideSameCommand() { + var plan = plan(""" + from test + | eval x = emp_no + 1, y = x, z = y + 1 + | stats cy = count(y), cx = min(x) by gender + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("cy", "cx", "gender")); + aggFieldName(aggs.get(0), Count.class, "x"); + aggFieldName(aggs.get(1), Min.class, "x"); + var by = as(aggs.get(2), FieldAttribute.class); + assertThat(Expressions.name(by), is("gender")); + var eval = as(agg.child(), Eval.class); + assertThat(Expressions.names(eval.fields()), contains("x")); + var source = as(eval.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[gender{f}#22],[COUNT(z{r}#9) AS cy, MIN(x{r}#3) AS cx, gender{f}#22]] + * \_Eval[[emp_no{f}#20 + 1[INTEGER] AS x, x{r}#3 + 1[INTEGER] AS z]] + * \_EsRelation[test][_meta_field{f}#26, emp_no{f}#20, first_name{f}#21, ..] + */ + public void testEvalAliasingInsideSameCommandWithShadowing() { + var plan = plan(""" + from test + | eval x = emp_no + 1, y = x, z = y + 1, y = z + | stats cy = count(y), cx = min(x) by gender + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("cy", "cx", "gender")); + aggFieldName(aggs.get(0), Count.class, "z"); + aggFieldName(aggs.get(1), Min.class, "x"); + var by = as(aggs.get(2), FieldAttribute.class); + assertThat(Expressions.name(by), is("gender")); + var eval = as(agg.child(), Eval.class); + assertThat(Expressions.names(eval.fields()), contains("x", "z")); + var source = as(eval.child(), EsRelation.class); + } + + public void testPruneRenameOnAgg() { + var plan = plan(""" + from test + | rename emp_no as x + | rename salary as y + | stats cy = count(y), cx = min(x) by gender + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("cy", "cx", "gender")); + aggFieldName(aggs.get(0), Count.class, "salary"); + aggFieldName(aggs.get(1), Min.class, "emp_no"); + + var source = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[gender{f}#14],[COUNT(salary{f}#17) AS cy, MIN(emp_no{f}#12) AS cx, gender{f}#14]] + * \_EsRelation[test][_meta_field{f}#18, emp_no{f}#12, first_name{f}#13, ..] + */ + public void testPruneRenameOnAggBy() { + var plan = plan(""" + from test + | rename emp_no as x + | rename salary as y, gender as g + | stats cy = count(y), cx = min(x) by g + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("cy", "cx", "g")); + aggFieldName(aggs.get(0), Count.class, "salary"); + aggFieldName(aggs.get(1), Min.class, "emp_no"); + var groupby = aliased(aggs.get(2), FieldAttribute.class); + assertThat(Expressions.name(groupby), is("gender")); + + var source = as(agg.child(), EsRelation.class); + } + + private T aliased(Expression exp, Class clazz) { + var alias = as(exp, Alias.class); + return as(alias.child(), clazz); + } + + private void aggFieldName(Expression exp, Class aggType, String fieldName) { + var alias = as(exp, Alias.class); + var af = as(alias.child(), aggType); + var field = af.field(); + assertThat(Expressions.name(field), is(fieldName)); + } + private LogicalPlan optimizedPlan(String query) { return plan(query); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 9f3bef6d064e6..2b25035e4097d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -103,7 +103,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -//@TestLogging(value = "org.elasticsearch.xpack.esql.optimizer.LocalLogicalPlanOptimizer:TRACE", reason = "debug") +//@TestLogging(value = "org.elasticsearch.xpack.esql.optimizer:TRACE", reason = "debug") public class PhysicalPlanOptimizerTests extends ESTestCase { private static final String PARAM_FORMATTING = "%1$s"; @@ -244,6 +244,16 @@ public void testExactlyOneExtractorPerFieldWithPruning() { assertThat(query.estimatedRowSize(), equalTo(allFieldRowSize + Integer.BYTES * 2)); } + /** + * Expects + * LimitExec[500[INTEGER]] + * \_AggregateExec[[],[SUM(salary{f}#882) AS x],FINAL,null] + * \_ExchangeExec[[sum{r}#887, seen{r}#888],true] + * \_FragmentExec[filter=null, estimatedRowSize=0, fragment=[ + * Aggregate[[],[SUM(salary{f}#882) AS x]] + * \_Filter[ROUND(emp_no{f}#877) > 10[INTEGER]] + * \_EsRelation[test][_meta_field{f}#883, emp_no{f}#877, first_name{f}#87..]]] + */ public void testDoubleExtractorPerFieldEvenWithAliasNoPruningDueToImplicitProjection() { var plan = physicalPlan(""" from test @@ -261,9 +271,7 @@ public void testDoubleExtractorPerFieldEvenWithAliasNoPruningDueToImplicitProjec aggregate = as(exchange.child(), AggregateExec.class); assertThat(aggregate.estimatedRowSize(), equalTo(Long.BYTES)); - var eval = as(aggregate.child(), EvalExec.class); - - var extract = as(eval.child(), FieldExtractExec.class); + var extract = as(aggregate.child(), FieldExtractExec.class); assertThat(names(extract.attributesToExtract()), contains("salary")); var filter = as(extract.child(), FilterExec.class); @@ -271,7 +279,7 @@ public void testDoubleExtractorPerFieldEvenWithAliasNoPruningDueToImplicitProjec assertThat(names(extract.attributesToExtract()), contains("emp_no")); var query = source(extract.child()); - assertThat(query.estimatedRowSize(), equalTo(Integer.BYTES * 4 /* for doc id, emp_no, salary, and c */)); + assertThat(query.estimatedRowSize(), equalTo(Integer.BYTES * 3 /* for doc id, emp_no and salary*/)); } public void testTripleExtractorPerField() { @@ -903,16 +911,12 @@ public void testQueryWithLimitSort() throws Exception { /** * Expected - * - * ProjectExec[[emp_no{f}#7, x{r}#4]] - * \_TopNExec[[Order[emp_no{f}#7,ASC,LAST]],5[INTEGER]] - * \_ExchangeExec[] - * \_ProjectExec[[emp_no{f}#7, x{r}#4]] - * \_TopNExec[[Order[emp_no{f}#7,ASC,LAST]],5[INTEGER]] - * \_FieldExtractExec[emp_no{f}#7] - * \_EvalExec[[first_name{f}#8 AS x]] - * \_FieldExtractExec[first_name{f}#8] - * \_EsQueryExec[test], query[][_doc{f}#14], limit[] + * ProjectExec[[emp_no{f}#7, first_name{f}#8 AS x]] + * \_TopNExec[[Order[emp_no{f}#7,ASC,LAST]],5[INTEGER],0] + * \_ExchangeExec[[],false] + * \_ProjectExec[[emp_no{f}#7, first_name{f}#8]] + * \_FieldExtractExec[emp_no{f}#7, first_name{f}#8] + * \_EsQueryExec[test], query[][_doc{f}#28], limit[5], sort[[FieldSort[field=emp_no{f}#7, direction=ASC, nulls=LAST]]]... */ public void testLocalProjectIncludeLocalAlias() throws Exception { var optimized = optimizedPlan(physicalPlan(""" @@ -928,11 +932,9 @@ public void testLocalProjectIncludeLocalAlias() throws Exception { var exchange = asRemoteExchange(topN.child()); project = as(exchange.child(), ProjectExec.class); - assertThat(names(project.projections()), contains("emp_no", "x")); - topN = as(project.child(), TopNExec.class); - var extract = as(topN.child(), FieldExtractExec.class); - var eval = as(extract.child(), EvalExec.class); - extract = as(eval.child(), FieldExtractExec.class); + assertThat(names(project.projections()), contains("emp_no", "first_name")); + var extract = as(project.child(), FieldExtractExec.class); + var source = as(extract.child(), EsQueryExec.class); } /** diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Alias.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Alias.java index 7f22cece89b00..df9f3c1d20eec 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Alias.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Alias.java @@ -56,8 +56,12 @@ protected NodeInfo info() { return NodeInfo.create(this, Alias::new, name(), qualifier, child, id(), synthetic()); } + public Alias replaceChild(Expression child) { + return new Alias(source(), name(), qualifier, child, id(), synthetic()); + } + @Override - public Expression replaceChildren(List newChildren) { + public Alias replaceChildren(List newChildren) { return new Alias(source(), name(), qualifier, newChildren.get(0), id(), synthetic()); } From acca114b8ca8fcdaf8560285b11d8be6396d00df Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 5 Oct 2023 13:29:05 -0400 Subject: [PATCH 009/176] ESQL: Temporarily disable huge concat tests (#100352) We're working on these and we have a plan! --- .../elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java index 23638ef9384cc..84c654f8946fb 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java @@ -167,6 +167,7 @@ public void testSmallConcat() throws IOException { assertMap(map, matchesMap().entry("columns", columns).entry("values", values)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") public void testHugeConcat() throws IOException { initSingleDocIndex(); assertCircuitBreaks(() -> concat(10)); From 110dd5ed16b33575b4ac7324d9aa09cf01036948 Mon Sep 17 00:00:00 2001 From: Stuart Tettemer Date: Thu, 5 Oct 2023 12:58:18 -0500 Subject: [PATCH 010/176] Tracing: Use doPriv when working with spans, use SpanId (#100232) `SpanId` is used when explicitly closing the trace in `executeQueryPhase` to avoid double closing the associated task. `doPrivileged` avoids hitting `java.lang.UnsupportedOperationException: Cannot define class using reflection: access denied ("java.lang.reflect.ReflectPermission" "suppressAccessChecks")` when classes are sometimes injected while switching spans. Removed `default Releasable withScope(Task task)` from the Tracer API because it automatically created a span id and, in one of the three uses, that SpanId was necessary to close the span. Fixes: #100072 --- docs/changelog/100232.yaml | 5 +++++ .../telemetry/apm/internal/tracing/APMTracer.java | 12 +++++++++--- .../java/org/elasticsearch/search/SearchService.java | 10 ++++++---- .../org/elasticsearch/telemetry/tracing/Tracer.java | 12 ------------ 4 files changed, 20 insertions(+), 19 deletions(-) create mode 100644 docs/changelog/100232.yaml diff --git a/docs/changelog/100232.yaml b/docs/changelog/100232.yaml new file mode 100644 index 0000000000000..3f8336b6c241c --- /dev/null +++ b/docs/changelog/100232.yaml @@ -0,0 +1,5 @@ +pr: 100232 +summary: "Tracing: Use `doPriv` when working with spans, use `SpanId`" +area: Infra/Core +type: bug +issues: [] diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java index 866f819609515..49fdc44681aa3 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java @@ -283,7 +283,7 @@ private Context getParentContext(ThreadContext threadContext) { public Releasable withScope(SpanId spanId) { final Context context = spans.get(spanId); if (context != null) { - var scope = context.makeCurrent(); + var scope = AccessController.doPrivileged((PrivilegedAction) context::makeCurrent); return scope::close; } return () -> {}; @@ -381,7 +381,10 @@ public void stopTrace(SpanId spanId) { final var span = Span.fromContextOrNull(spans.remove(spanId)); if (span != null) { logger.trace("Finishing trace [{}]", spanId); - span.end(); + AccessController.doPrivileged((PrivilegedAction) () -> { + span.end(); + return null; + }); } } @@ -390,7 +393,10 @@ public void stopTrace(SpanId spanId) { */ @Override public void stopTrace() { - Span.current().end(); + AccessController.doPrivileged((PrivilegedAction) () -> { + Span.current().end(); + return null; + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 0de6cb133bca3..88487f528096c 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -114,6 +114,7 @@ import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.telemetry.tracing.SpanId; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.Scheduler.Cancellable; @@ -492,7 +493,7 @@ public void executeDfsPhase(ShardSearchRequest request, SearchShardTask task, Ac private DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchShardTask task) throws IOException { ReaderContext readerContext = createOrGetReaderContext(request); try (@SuppressWarnings("unused") // withScope call is necessary to instrument search execution - Releasable scope = tracer.withScope(task); + Releasable scope = tracer.withScope(SpanId.forTask(task)); Releasable ignored = readerContext.markAsUsed(getKeepAlive(request)); SearchContext context = createContext(readerContext, request, task, ResultsType.DFS, false) ) { @@ -658,8 +659,9 @@ private static void runAsync(Executor executor, CheckedSupplier {}; } - - @Override - public Releasable withScope(Task task) { - return () -> {}; - } }; interface AttributeKeys { From 7ff9c9f18f18b61d8b20186f04ae6ea3a9de253e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 5 Oct 2023 14:03:00 -0400 Subject: [PATCH 011/176] ESQL: Silence failing test (#100357) It's not releasing blocks. Relates to #100356 --- .../qa/testFixtures/src/main/resources/rename.csv-spec | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec index 5e5c70e3cbba7..44cf92254298b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec @@ -84,7 +84,8 @@ x:integer | z:integer 4 | 8 ; -renameProjectEval +# AwaitsFix https://github.com/elastic/elasticsearch/issues/100356 +renameProjectEval-Ignore from employees | sort emp_no | eval y = languages | rename languages as x | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3; x:integer | y:integer | x2:integer | y2:integer @@ -93,7 +94,8 @@ x:integer | y:integer | x2:integer | y2:integer 4 | 4 | 5 | 6 ; -duplicateProjectEval +# AwaitsFix https://github.com/elastic/elasticsearch/issues/100356 +duplicateProjectEval-Ignore from employees | eval y = languages, x = languages | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3; x:integer | y:integer | x2:integer | y2:integer @@ -158,7 +160,8 @@ y:integer | x:date 10061 | 1985-09-17T00:00:00.000Z ; -renameIntertwinedWithSort +# AwaitsFix https://github.com/elastic/elasticsearch/issues/100356 +renameIntertwinedWithSort-Ignore FROM employees | eval x = salary | rename x as y | rename y as x | sort x | rename x as y | limit 10; avg_worked_seconds:l | birth_date:date | emp_no:i | first_name:s | gender:s | height:d | height.float:d | height.half_float:d | height.scaled_float:d| hire_date:date | is_rehired:bool | job_positions:s | languages:i | languages.byte:i | languages.long:l | languages.short:i | last_name:s | salary:i | salary_change:d | salary_change.int:i | salary_change.keyword:s | salary_change.long:l | still_hired:bool | y:i From 0ef4da2b8480c3b1fd3a8fc48c290fb49de1f194 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Thu, 5 Oct 2023 12:03:35 -0700 Subject: [PATCH 012/176] Temporarily disable failing test Relates #100365 --- .../java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index fd4fe13b9c1b1..46d85746c3990 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.action; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Build; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -66,6 +67,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.nullValue; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100365") public class EsqlActionIT extends AbstractEsqlIntegTestCase { long epoch = System.currentTimeMillis(); From c1a0e85da5300a33f9ee79423989f1838ad889ed Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 5 Oct 2023 16:41:46 -0700 Subject: [PATCH 013/176] Fix assertion in BlockFactory (#100373) --- .../main/java/org/elasticsearch/compute/data/BlockFactory.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java index 0e93bc1ee5e90..ad5dfbf298200 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java @@ -214,7 +214,7 @@ public IntBlock newConstantIntBlockWith(int value, int positions, long preAdjust public IntVector newConstantIntVector(int value, int positions) { adjustBreaker(ConstantIntVector.RAM_BYTES_USED, false); var v = new ConstantIntVector(value, positions, this); - assert v.ramBytesUsed() == ConstantLongVector.RAM_BYTES_USED; + assert v.ramBytesUsed() == ConstantIntVector.RAM_BYTES_USED; return v; } From 88a9139142950533730d6b28c731e07ce93e9ba6 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 5 Oct 2023 21:13:57 -0700 Subject: [PATCH 014/176] Release success blocks when reading from stream (#100376) If we encounter a failure while reading blocks from an InputStream, we should release the already-read blocks. Found this while running IT tests with CrankyCircuitBreakerService. --- .../java/org/elasticsearch/compute/data/Page.java | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java index 18f3ed7ba61bf..0265013eb2029 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java @@ -97,8 +97,16 @@ public Page(StreamInput in) throws IOException { int positionCount = in.readVInt(); int blockPositions = in.readVInt(); Block[] blocks = new Block[blockPositions]; - for (int blockIndex = 0; blockIndex < blockPositions; blockIndex++) { - blocks[blockIndex] = in.readNamedWriteable(Block.class); + boolean success = false; + try { + for (int blockIndex = 0; blockIndex < blockPositions; blockIndex++) { + blocks[blockIndex] = in.readNamedWriteable(Block.class); + } + success = true; + } finally { + if (success == false) { + Releasables.closeExpectNoException(blocks); + } } this.positionCount = positionCount; this.blocks = blocks; From e0f86191bb82beea8f1f390d270e5f3650690dcc Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 5 Oct 2023 21:14:56 -0700 Subject: [PATCH 015/176] Release output pages when ESQL request fails (#100378) We should release output pages collected by the ComputeService when an ESQL request fails or is canceled. Closes #100341 --- .../org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java | 2 -- .../org/elasticsearch/xpack/esql/plugin/ComputeService.java | 4 ++++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java index dc305f95325b2..4bbcff44ec740 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.action; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.FollowersChecker; @@ -31,7 +30,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @ESIntegTestCase.ClusterScope(scope = TEST, minNumDataNodes = 2, maxNumDataNodes = 4) -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100341") public class EsqlDisruptionIT extends EsqlActionIT { // copied from AbstractDisruptionTestCase diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 3ea8ffe242919..e10469a4ff97d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -127,6 +127,10 @@ public void execute( configuration ); final List collectedPages = Collections.synchronizedList(new ArrayList<>()); + listener = listener.delegateResponse((l, e) -> { + collectedPages.forEach(p -> Releasables.closeExpectNoException(p::releaseBlocks)); + l.onFailure(e); + }); PhysicalPlan coordinatorPlan = new OutputExec(coordinatorAndDataNodePlan.v1(), collectedPages::add); PhysicalPlan dataNodePlan = coordinatorAndDataNodePlan.v2(); From 8e25d6b59875111508840f0b552a13b23cf38be3 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Fri, 6 Oct 2023 10:09:24 +0200 Subject: [PATCH 016/176] Mute testCancelViaAsyncSearchDelete (#100384) With this commit we mute `CrossClusterAsyncSearchIT#testCancelViaAsyncSearchDelete`. Relates #99519 --- .../elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index daed401e6e956..f079828ccb5bf 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -1246,6 +1246,7 @@ public void testCancelViaTasksAPI() throws Exception { assertThat(json, matchesRegex(".*task (was)?\s*cancelled.*")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99519") public void testCancelViaAsyncSearchDelete() throws Exception { Map testClusterInfo = setupTwoClusters(); String localIndex = (String) testClusterInfo.get("local.index"); From 002d9410a894f8dc7c32755e74d375f5fa4ae871 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Fri, 6 Oct 2023 10:14:48 +0200 Subject: [PATCH 017/176] Mute testTsdbDataStreams (#100385) Relates #100271 --- .../org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index 0df010c1c9bf7..2d9d4163102b8 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -301,6 +301,7 @@ public void testRollupIndexInTheHotPhaseAfterRollover() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100271") public void testTsdbDataStreams() throws Exception { // Create the ILM policy DateHistogramInterval fixedInterval = ConfigTestHelpers.randomInterval(); From fd3762855b2eb4a0e904ae1116e843b8d4b1a99c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Fri, 6 Oct 2023 11:30:53 +0200 Subject: [PATCH 018/176] Making classname optional in Transport protocol (#99702) Making classname optional in Transport protocol since it is not needed with Stable plugins. --- docs/changelog/99702.yaml | 6 ++++ .../org/elasticsearch/TransportVersions.java | 2 +- .../plugins/PluginDescriptor.java | 32 +++++++++++++++++-- .../nodesinfo/NodeInfoStreamingTests.java | 16 ++++++---- .../plugins/PluginDescriptorTests.java | 28 +++++++++++----- .../plugins/PluginsServiceTests.java | 20 ++++++++++-- .../plugins/PluginsUtilsTests.java | 2 +- .../test/SecuritySingleNodeTestCase.java | 1 + .../test/SecurityIntegTestCase.java | 1 + 9 files changed, 87 insertions(+), 21 deletions(-) create mode 100644 docs/changelog/99702.yaml diff --git a/docs/changelog/99702.yaml b/docs/changelog/99702.yaml new file mode 100644 index 0000000000000..657ff34e045a8 --- /dev/null +++ b/docs/changelog/99702.yaml @@ -0,0 +1,6 @@ +pr: 99702 +summary: Making classname optional in Transport protocol +area: Infra/Plugins +type: bug +issues: + - 98584 diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index c49ff1b1f0d29..e851434ac2cb7 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -134,7 +134,7 @@ static TransportVersion def(int id) { public static final TransportVersion NODE_INFO_REQUEST_SIMPLIFIED = def(8_510_00_0); public static final TransportVersion NESTED_KNN_VECTOR_QUERY_V = def(8_511_00_0); public static final TransportVersion ML_PACKAGE_LOADER_PLATFORM_ADDED = def(8_512_00_0); - + public static final TransportVersion PLUGIN_DESCRIPTOR_OPTIONAL_CLASSNAME = def(8_513_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java index 46028ad36b66c..6d6089bf592f6 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java @@ -78,6 +78,7 @@ public class PluginDescriptor implements Writeable, ToXContentObject { * @param hasNativeController whether or not the plugin has a native controller * @param isLicensed whether is this a licensed plugin * @param isModular whether this plugin should be loaded in a module layer + * @param isStable whether this plugin is implemented using the stable plugin API */ public PluginDescriptor( String name, @@ -105,6 +106,8 @@ public PluginDescriptor( this.isLicensed = isLicensed; this.isModular = isModular; this.isStable = isStable; + + ensureCorrectArgumentsForPluginType(); } /** @@ -119,7 +122,11 @@ public PluginDescriptor(final StreamInput in) throws IOException { this.version = in.readString(); elasticsearchVersion = Version.readVersion(in); javaVersion = in.readString(); - this.classname = in.readString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.PLUGIN_DESCRIPTOR_OPTIONAL_CLASSNAME)) { + this.classname = in.readOptionalString(); + } else { + this.classname = in.readString(); + } if (in.getTransportVersion().onOrAfter(MODULE_NAME_SUPPORT)) { this.moduleName = in.readOptionalString(); } else { @@ -145,6 +152,8 @@ public PluginDescriptor(final StreamInput in) throws IOException { isModular = moduleName != null; isStable = false; } + + ensureCorrectArgumentsForPluginType(); } @Override @@ -154,7 +163,11 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(version); Version.writeVersion(elasticsearchVersion, out); out.writeString(javaVersion); - out.writeString(classname); + if (out.getTransportVersion().onOrAfter(TransportVersions.PLUGIN_DESCRIPTOR_OPTIONAL_CLASSNAME)) { + out.writeOptionalString(classname); + } else { + out.writeString(classname); + } if (out.getTransportVersion().onOrAfter(MODULE_NAME_SUPPORT)) { out.writeOptionalString(moduleName); } @@ -174,6 +187,18 @@ public void writeTo(final StreamOutput out) throws IOException { } } + private void ensureCorrectArgumentsForPluginType() { + if (classname == null && isStable == false) { + throw new IllegalArgumentException("Classname must be provided for classic plugins"); + } + if (classname != null && isStable) { + throw new IllegalArgumentException("Classname is not needed for stable plugins"); + } + if (moduleName != null && isStable) { + throw new IllegalArgumentException("ModuleName is not needed for stable plugins"); + } + } + /** * Reads the descriptor file for a plugin. * @@ -329,6 +354,9 @@ public String getDescription() { * @return the entry point to the plugin */ public String getClassname() { + if (isStable) { + throw new IllegalStateException("Stable plugins do not have an explicit entry point"); + } return classname; } diff --git a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index c92c9c5305777..a0accacc65eee 100644 --- a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -156,6 +156,8 @@ private static NodeInfo createNodeInfo() { int numPlugins = randomIntBetween(0, 5); List plugins = new ArrayList<>(); for (int i = 0; i < numPlugins; i++) { + var isStable = randomBoolean(); + var hasModuleName = randomBoolean(); plugins.add( new PluginDescriptor( randomAlphaOfLengthBetween(3, 10), @@ -163,19 +165,21 @@ private static NodeInfo createNodeInfo() { randomAlphaOfLengthBetween(3, 10), VersionUtils.randomVersion(random()), "1.8", - randomAlphaOfLengthBetween(3, 10), - randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10), + isStable ? null : randomAlphaOfLengthBetween(3, 10), + isStable || hasModuleName == false ? null : randomAlphaOfLengthBetween(3, 10), Collections.emptyList(), randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean() + isStable ) ); } int numModules = randomIntBetween(0, 5); List modules = new ArrayList<>(); for (int i = 0; i < numModules; i++) { + var isStable = randomBoolean(); + var hasModuleName = randomBoolean(); modules.add( new PluginDescriptor( randomAlphaOfLengthBetween(3, 10), @@ -183,13 +187,13 @@ private static NodeInfo createNodeInfo() { randomAlphaOfLengthBetween(3, 10), VersionUtils.randomVersion(random()), "1.8", - randomAlphaOfLengthBetween(3, 10), - randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10), + isStable ? null : randomAlphaOfLengthBetween(3, 10), + isStable || hasModuleName == false ? null : randomAlphaOfLengthBetween(3, 10), Collections.emptyList(), randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean() + isStable ) ); } diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java index 10d5b3b9355d6..5e0e82cb18e20 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java @@ -235,7 +235,7 @@ public void testSerialize() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean() + false ); BytesStreamOutput output = new BytesStreamOutput(); info.writeTo(output); @@ -258,7 +258,7 @@ public void testSerializeWithModuleName() throws Exception { randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean() + false ); BytesStreamOutput output = new BytesStreamOutput(); info.writeTo(output); @@ -268,6 +268,16 @@ public void testSerializeWithModuleName() throws Exception { assertThat(info2.toString(), equalTo(info.toString())); } + public void testSerializeStablePluginDescriptor() throws Exception { + PluginDescriptor info = mockStableDescriptor(); + BytesStreamOutput output = new BytesStreamOutput(); + info.writeTo(output); + ByteBuffer buffer = ByteBuffer.wrap(output.bytes().toBytesRef().bytes); + ByteBufferStreamInput input = new ByteBufferStreamInput(buffer); + PluginDescriptor info2 = new PluginDescriptor(input); + assertThat(info2.toString(), equalTo(info.toString())); + } + PluginDescriptor newMockDescriptor(String name) { return new PluginDescriptor( name, @@ -281,7 +291,7 @@ PluginDescriptor newMockDescriptor(String name) { randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean() + false ); } @@ -311,19 +321,21 @@ public void testUnknownProperties() throws Exception { * use the hashcode to catch duplicate names */ public void testPluginEqualityAndHash() { + var isStable = randomBoolean(); + var classname = isStable ? null : "dummyclass"; PluginDescriptor descriptor1 = new PluginDescriptor( "c", "foo", "dummy", Version.CURRENT, "1.8", - "dummyclass", + classname, null, Collections.singletonList("foo"), randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean() + isStable ); // everything but name is different from descriptor1 PluginDescriptor descriptor2 = new PluginDescriptor( @@ -332,8 +344,8 @@ public void testPluginEqualityAndHash() { randomValueOtherThan(descriptor1.getVersion(), () -> randomAlphaOfLengthBetween(4, 12)), descriptor1.getElasticsearchVersion().previousMajor(), randomValueOtherThan(descriptor1.getJavaVersion(), () -> randomAlphaOfLengthBetween(4, 12)), - randomValueOtherThan(descriptor1.getClassname(), () -> randomAlphaOfLengthBetween(4, 12)), - randomAlphaOfLength(6), + descriptor1.isStable() ? randomAlphaOfLengthBetween(4, 12) : null, + descriptor1.isStable() ? randomAlphaOfLength(6) : null, Collections.singletonList( randomValueOtherThanMany(v -> descriptor1.getExtendedPlugins().contains(v), () -> randomAlphaOfLengthBetween(4, 12)) ), @@ -349,7 +361,7 @@ public void testPluginEqualityAndHash() { descriptor1.getVersion(), descriptor1.getElasticsearchVersion(), descriptor1.getJavaVersion(), - descriptor1.getClassname(), + classname, descriptor1.getModuleName().orElse(null), descriptor1.getExtendedPlugins(), descriptor1.hasNativeController(), diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 72aba521f1b79..eddc029dded6d 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -460,10 +460,11 @@ public void testPluginFromParentClassLoader() throws IOException { public void testExtensiblePlugin() { TestExtensiblePlugin extensiblePlugin = new TestExtensiblePlugin(); + var classname = "FakePlugin"; PluginsService.loadExtensions( List.of( new PluginsService.LoadedPlugin( - new PluginDescriptor("extensible", null, null, null, null, null, null, List.of(), false, false, false, false), + new PluginDescriptor("extensible", null, null, null, null, classname, null, List.of(), false, false, false, false), extensiblePlugin ) ) @@ -477,11 +478,24 @@ public void testExtensiblePlugin() { PluginsService.loadExtensions( List.of( new PluginsService.LoadedPlugin( - new PluginDescriptor("extensible", null, null, null, null, null, null, List.of(), false, false, false, false), + new PluginDescriptor("extensible", null, null, null, null, classname, null, List.of(), false, false, false, false), extensiblePlugin ), new PluginsService.LoadedPlugin( - new PluginDescriptor("test", null, null, null, null, null, null, List.of("extensible"), false, false, false, false), + new PluginDescriptor( + "test", + null, + null, + null, + null, + classname, + null, + List.of("extensible"), + false, + false, + false, + false + ), testPlugin ) ) diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java index 755f8dcf482b8..05ba122c2aaf0 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java @@ -436,7 +436,7 @@ private static PluginDescriptor getPluginDescriptorForVersion(Version id, String "1.0", id, javaVersion, - "FakePlugin", + isStable ? null : "FakePlugin", null, Collections.emptyList(), false, diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java index 1776b3bfd3c36..77ae4ab838585 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java @@ -119,6 +119,7 @@ private void doAssertXPackIsInstalled() { Collection pluginNames = nodeInfo.getInfo(PluginsAndModules.class) .getPluginInfos() .stream() + .filter(p -> p.descriptor().isStable() == false) .map(p -> p.descriptor().getClassname()) .collect(Collectors.toList()); assertThat( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 6057667fb575e..4e6ea34d3dc9e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -187,6 +187,7 @@ protected void doAssertXPackIsInstalled() { Collection pluginNames = nodeInfo.getInfo(PluginsAndModules.class) .getPluginInfos() .stream() + .filter(p -> p.descriptor().isStable() == false) .map(p -> p.descriptor().getClassname()) .collect(Collectors.toList()); assertThat( From 350720e972c61fbc5fd8b606358d1427cb33ef25 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 6 Oct 2023 02:46:54 -0700 Subject: [PATCH 019/176] Fix system property value for inference rescorer tests feature flag (#100366) Co-authored-by: Elastic Machine --- .../main/java/org/elasticsearch/test/cluster/FeatureFlag.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java index 38a91be1b8c3c..122989eaec65a 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java @@ -16,7 +16,7 @@ */ public enum FeatureFlag { TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null), - INFERENCE_RESCORER("es.inference_rescorer_feature_flag_enabled", Version.fromString("8.10.0"), null); + INFERENCE_RESCORER("es.inference_rescorer_feature_flag_enabled=true", Version.fromString("8.10.0"), null); public final String systemProperty; public final Version from; From 7012584a197a9ef9421a71466f283544803b3201 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 6 Oct 2023 12:32:43 +0100 Subject: [PATCH 020/176] [ML] Fix empty requests being sent to nodes with the model allocations (#100388) Fix for inference requests being sent to every node with a model allocation. If there are more nodes than items in the original request then empty requests were sent. --- docs/changelog/100388.yaml | 7 +++++++ .../assignment/TrainedModelAssignment.java | 9 +++++++-- .../results/ErrorInferenceResults.java | 2 -- .../TrainedModelAssignmentTests.java | 11 ++++++++++ .../TransportInternalInferModelAction.java | 20 ++++++++++--------- .../upgrades/MLModelDeploymentsUpgradeIT.java | 1 - 6 files changed, 36 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/100388.yaml diff --git a/docs/changelog/100388.yaml b/docs/changelog/100388.yaml new file mode 100644 index 0000000000000..0ff5228ef36d1 --- /dev/null +++ b/docs/changelog/100388.yaml @@ -0,0 +1,7 @@ +pr: 100388 +summary: Fix for inference requests being sent to every node with a model allocation. +If there are more nodes than items in the original request then empty requests were sent. +area: Machine Learning +type: bug +issues: + - 100180 diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index 3664e4f620266..96ac120356283 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -213,7 +213,9 @@ public List> selectRandomStartedNodesWeighedOnAllocations var nodeCounts = new ArrayList>(); for (int i = 0; i < counts.length; i++) { - nodeCounts.add(new Tuple<>(nodeIds.get(i), counts[i])); + if (counts[i] > 0) { + nodeCounts.add(new Tuple<>(nodeIds.get(i), counts[i])); + } } return nodeCounts; } @@ -232,7 +234,10 @@ public List> selectRandomStartedNodesWeighedOnAllocations var nodeCounts = new ArrayList>(); for (int i = 0; i < counts.length; i++) { - nodeCounts.add(new Tuple<>(nodeIds.get(i), counts[i])); + // filter out zero counts + if (counts[i] > 0) { + nodeCounts.add(new Tuple<>(nodeIds.get(i), counts[i])); + } } return nodeCounts; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ErrorInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ErrorInferenceResults.java index c3b3a8f7d88f2..7785f8785a21e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ErrorInferenceResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ErrorInferenceResults.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.inference.InferenceResults; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -22,7 +21,6 @@ public class ErrorInferenceResults implements InferenceResults { public static final String NAME = "error"; - public static final ParseField WARNING = new ParseField("error"); private final Exception exception; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java index c85729b5a6311..ca777be21b3be 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java @@ -182,6 +182,17 @@ public void testselectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenSin assertThat(nodes.get(0), equalTo(new Tuple<>("node-1", 1))); } + public void testSingleRequestWith2Nodes() { + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5)); + builder.addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")); + builder.addRoutingEntry("node-2", new RoutingInfo(1, 1, RoutingState.STARTED, "")); + TrainedModelAssignment assignment = builder.build(); + + var nodes = assignment.selectRandomStartedNodesWeighedOnAllocationsForNRequests(1); + assertThat(nodes, hasSize(1)); + assertEquals(nodes.get(0).v2(), Integer.valueOf(1)); + } + public void testSelectRandomStartedNodeWeighedOnAllocationsForNRequests_GivenMultipleStartedNodes() { TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(6)); builder.addRoutingEntry("node-1", new RoutingInfo(1, 1, RoutingState.STARTED, "")); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index bc25afba066be..2827967c42cd5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -354,17 +354,19 @@ private void sendResponse() { } else { for (int i = 0; i < results.length(); i++) { var resultList = results.get(i); - if (resultList != null) { - for (var result : resultList) { - if (result instanceof ErrorInferenceResults errorResult) { - // Any failure fails all requests - // TODO is this the correct behaviour for batched requests? - finalListener.onFailure(errorResult.getException()); - return; - } + if (resultList == null) { + continue; + } + + for (var result : resultList) { + if (result instanceof ErrorInferenceResults errorResult) { + // Any failure fails all requests + // TODO is this the correct behaviour for batched requests? + finalListener.onFailure(errorResult.getException()); + return; } - responseBuilder.addInferenceResults(resultList); } + responseBuilder.addInferenceResults(resultList); } finalListener.onResponse(responseBuilder.build()); } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java index 4912bff3518f0..b9fbf0b6b1f03 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MLModelDeploymentsUpgradeIT.java @@ -97,7 +97,6 @@ public void removeLogging() throws IOException { client().performRequest(request); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100180") public void testTrainedModelDeployment() throws Exception { assumeTrue("NLP model deployments added in 8.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_0_0)); From 24353a9e7ce8c3aac71a1391e3c4f2214b071450 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Fri, 6 Oct 2023 14:54:59 +0300 Subject: [PATCH 021/176] Fixing exists query REST tests for sparse_vector_fields (#100030) Removing (incorrectly) expected exceptions for `exists` queries `sparse_vectors` in < 8.0.0 versions. Closes https://github.com/elastic/elasticsearch/issues/100003 --- .../test/search.vectors/90_sparse_vector.yml | 38 +++++++++++++------ .../vectors/SparseVectorFieldMapper.java | 8 +++- 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml index 27aa0e6e9a20b..7b8ce0b961b93 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml @@ -1,5 +1,5 @@ --- -"Indexing and searching sparse vectors": +"Indexing and searching sparse vectors in >=8.11": - skip: version: " - 8.10.99" @@ -77,16 +77,24 @@ index: test id: "3" body: - text: "doing nothing will result in nothing" + text: "empty array with no nested values - should not be retrieved in exists queries" ml: - tokens: {} + tokens: [ ] + - do: + index: + index: test + id: "4" + body: + text: "should still respond to exists queries if when empty" + ml: + tokens: { } - match: { result: "created" } - do: index: index: test - id: "4" + id: "5" body: text: "other embeddings available only" embeddings: @@ -144,9 +152,9 @@ --- "Sparse vector in 7.x": - skip: - features: allowed_warnings - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/100003" + features: ["allowed_warnings"] + version: "8.0.0 - " + reason: "sparse_vector field type supported in 7.x" - do: allowed_warnings: - "The [sparse_vector] field type is deprecated and will be removed in 8.0." @@ -164,17 +172,16 @@ - match: { acknowledged: true } - do: - catch: /\[sparse_vector\] fields do not support \[exists\] queries/ + allowed_warnings: + - "[sparse_vector] field type in old 7.x indices is allowed to contain [sparse_vector] fields, but they cannot be indexed or searched." search: - rest_total_hits_as_int: true index: test body: query: exists: field: ml.tokens - --- -"Sparse vector in 8.x": +"Sparse vector in 8.0.0 <= x < 8.11.0": - skip: version: " - 7.99.99, 8.11.0 - " reason: "sparse_vector field type not supported in 8.x until 8.11.0" @@ -189,3 +196,12 @@ type: text ml.tokens: type: sparse_vector + - do: + catch: /\[sparse_vector\] fields do not support \[exists\] queries|no such index.*/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + exists: + field: ml.tokens diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java index eded63e12e758..ee24ad0c4721b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.mapper.vectors; import org.apache.lucene.document.FeatureField; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.logging.DeprecationCategory; @@ -110,8 +111,11 @@ public Query termQuery(Object value, SearchExecutionContext context) { @Override public Query existsQuery(SearchExecutionContext context) { - // No support for exists queries prior to this version - if (context.getIndexSettings().getIndexVersionCreated().before(SPARSE_VECTOR_IN_FIELD_NAMES_INDEX_VERSION)) { + if (context.getIndexSettings().getIndexVersionCreated().before(PREVIOUS_SPARSE_VECTOR_INDEX_VERSION)) { + deprecationLogger.warn(DeprecationCategory.MAPPINGS, "sparse_vector", ERROR_MESSAGE_7X); + return new MatchNoDocsQuery(); + } else if (context.getIndexSettings().getIndexVersionCreated().before(SPARSE_VECTOR_IN_FIELD_NAMES_INDEX_VERSION)) { + // No support for exists queries prior to this version on 8.x throw new IllegalArgumentException("[sparse_vector] fields do not support [exists] queries"); } return super.existsQuery(context); From 4d634c1ef881dfe0bec0caf08842b215e7bae08f Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 6 Oct 2023 12:56:13 +0100 Subject: [PATCH 022/176] Fix changelog validation failure (#100396) Validation failure introduced in #100388 --- docs/changelog/100388.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/changelog/100388.yaml b/docs/changelog/100388.yaml index 0ff5228ef36d1..4b596b6ea23b6 100644 --- a/docs/changelog/100388.yaml +++ b/docs/changelog/100388.yaml @@ -1,6 +1,5 @@ pr: 100388 -summary: Fix for inference requests being sent to every node with a model allocation. -If there are more nodes than items in the original request then empty requests were sent. +summary: Fix for inference requests being sent to every node with a model allocation. If there are more nodes than items in the original request then empty requests were sent. area: Machine Learning type: bug issues: From 2d08e85f6d71f76d3a47eb17c128b61888efd826 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 6 Oct 2023 12:58:21 +0100 Subject: [PATCH 023/176] Fix nonforking fast path in InboundHandler (#100389) Fixes an equality check missed in #98854. --- .../main/java/org/elasticsearch/transport/InboundHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java index 171f693905b61..5d47c79abfd61 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java @@ -268,7 +268,7 @@ private void handleRequest(TcpChannel channel, Inbo assert requestId > 0; request.setRequestId(requestId); verifyRequestReadFully(stream, requestId, action); - if (ThreadPool.Names.SAME.equals(reg.getExecutor())) { + if (reg.getExecutor() == EsExecutors.DIRECT_EXECUTOR_SERVICE) { try (var ignored = threadPool.getThreadContext().newTraceContext()) { doHandleRequest(reg, request, transportChannel); } From 7cffacbe8f831f57648745421d679635c75ca919 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 6 Oct 2023 08:17:17 -0400 Subject: [PATCH 024/176] ESQL: Tests for loading many fields (#100363) Turns out we weren't turning on the tracking `BigArrays`. That helps, but for this test to fully pass we'd have to use the `BlockFactory` to build the loaded fields. Co-authored-by: Alexander Spies --- .../esql/qa/single_node/HeapAttackIT.java | 123 ++++++++++++++---- .../xpack/esql/plugin/EsqlPlugin.java | 2 +- 2 files changed, 98 insertions(+), 27 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java index 84c654f8946fb..e6dc165a75509 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java @@ -13,10 +13,13 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ListMatcher; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.junit.After; @@ -91,14 +94,7 @@ private Response sortByManyLongs(int count) throws IOException { query.append(", i").append(i); } query.append("\\n| KEEP a, b | LIMIT 10000\"}"); - Request request = new Request("POST", "/_query"); - request.setJsonEntity(query.toString()); - request.addParameter("error_trace", ""); - request.setOptions( - RequestOptions.DEFAULT.toBuilder() - .setRequestConfig(RequestConfig.custom().setSocketTimeout(Math.toIntExact(TimeValue.timeValueMinutes(5).millis())).build()) - ); - return client().performRequest(request); + return query(query.toString(), null); } /** @@ -139,14 +135,7 @@ private Response groupOnManyLongs(int count) throws IOException { query.append(", i").append(i); } query.append("\\n| STATS MAX(a)\"}"); - Request request = new Request("POST", "/_query"); - request.setJsonEntity(query.toString()); - request.addParameter("error_trace", ""); - request.setOptions( - RequestOptions.DEFAULT.toBuilder() - .setRequestConfig(RequestConfig.custom().setSocketTimeout(Math.toIntExact(TimeValue.timeValueMinutes(5).millis())).build()) - ); - return client().performRequest(request); + return query(query.toString(), null); } private StringBuilder makeManyLongs(int count) { @@ -182,10 +171,7 @@ private Response concat(int evals) throws IOException { .append(")"); } query.append("\"}"); - Request request = new Request("POST", "/_query"); - request.addParameter("error_trace", ""); - request.setJsonEntity(query.toString().replace("\n", "\\n")); - return client().performRequest(request); + return query(query.toString(), null); } /** @@ -240,10 +226,7 @@ private Response manyConcat(int strings) throws IOException { query.append("str").append(s); } query.append("\"}"); - Request request = new Request("POST", "/_query"); - request.addParameter("error_trace", ""); - request.setJsonEntity(query.toString().replace("\n", "\\n")); - return client().performRequest(request); + return query(query.toString(), null); } public void testManyEval() throws IOException { @@ -280,12 +263,47 @@ private Response manyEval(int evalLines) throws IOException { } } query.append("\n| LIMIT 10000\"}"); + return query(query.toString(), null); + } + + private Response query(String query, String filterPath) throws IOException { Request request = new Request("POST", "/_query"); request.addParameter("error_trace", ""); + if (filterPath != null) { + request.addParameter("filter_path", filterPath); + } request.setJsonEntity(query.toString().replace("\n", "\\n")); + request.setOptions( + RequestOptions.DEFAULT.toBuilder() + .setRequestConfig(RequestConfig.custom().setSocketTimeout(Math.toIntExact(TimeValue.timeValueMinutes(5).millis())).build()) + ); return client().performRequest(request); } + public void testFetchManyBigFields() throws IOException { + initManyBigFieldsIndex(100); + fetchManyBigFields(100); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") + public void testFetchTooManyBigFields() throws IOException { + initManyBigFieldsIndex(500); + assertCircuitBreaks(() -> fetchManyBigFields(500)); + } + + /** + * Fetches documents containing 1000 fields which are {@code 1kb} each. + */ + private void fetchManyBigFields(int docs) throws IOException { + Response response = query("{\"query\": \"FROM manybigfields | SORT f000 | LIMIT " + docs + "\"}", "columns"); + Map map = XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); + ListMatcher columns = matchesList(); + for (int f = 0; f < 1000; f++) { + columns = columns.item(matchesMap().entry("name", "f" + String.format(Locale.ROOT, "%03d", f)).entry("type", "keyword")); + } + assertMap(map, matchesMap().entry("columns", columns)); + } + private void initManyLongs() throws IOException { logger.info("loading many documents with longs"); StringBuilder bulk = new StringBuilder(); @@ -314,13 +332,66 @@ private void initSingleDocIndex() throws IOException { """); } - private void initIndex(String name, String bulk) throws IOException { + private void initManyBigFieldsIndex(int docs) throws IOException { + logger.info("loading many documents with many big fields"); + int docsPerBulk = 5; + int fields = 1000; + int fieldSize = Math.toIntExact(ByteSizeValue.ofKb(1).getBytes()); + + Request request = new Request("PUT", "/manybigfields"); + XContentBuilder config = JsonXContent.contentBuilder().startObject(); + config.startObject("settings").field("index.mapping.total_fields.limit", 10000).endObject(); + config.startObject("mappings").startObject("properties"); + for (int f = 0; f < fields; f++) { + config.startObject("f" + String.format(Locale.ROOT, "%03d", f)).field("type", "keyword").endObject(); + } + config.endObject().endObject(); + request.setJsonEntity(Strings.toString(config.endObject())); + Response response = client().performRequest(request); + assertThat( + EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8), + equalTo("{\"acknowledged\":true,\"shards_acknowledged\":true,\"index\":\"manybigfields\"}") + ); + + StringBuilder bulk = new StringBuilder(); + for (int d = 0; d < docs; d++) { + bulk.append("{\"create\":{}}\n"); + for (int f = 0; f < fields; f++) { + if (f == 0) { + bulk.append('{'); + } else { + bulk.append(", "); + } + bulk.append('"').append("f").append(String.format(Locale.ROOT, "%03d", f)).append("\": \""); + bulk.append(Integer.toString(f % 10).repeat(fieldSize)); + bulk.append('"'); + } + bulk.append("}\n"); + if (d % docsPerBulk == docsPerBulk - 1 && d != docs - 1) { + bulk("manybigfields", bulk.toString()); + bulk.setLength(0); + } + } + initIndex("manybigfields", bulk.toString()); + } + + private void bulk(String name, String bulk) throws IOException { Request request = new Request("POST", "/" + name + "/_bulk"); - request.addParameter("refresh", "true"); request.addParameter("filter_path", "errors"); request.setJsonEntity(bulk.toString()); Response response = client().performRequest(request); assertThat(EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8), equalTo("{\"errors\":false}")); + } + + private void initIndex(String name, String bulk) throws IOException { + bulk(name, bulk); + + Request request = new Request("POST", "/" + name + "/_refresh"); + Response response = client().performRequest(request); + assertThat( + EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8), + equalTo("{\"_shards\":{\"total\":2,\"successful\":1,\"failed\":0}}") + ); request = new Request("POST", "/" + name + "/_forcemerge"); request.addParameter("max_num_segments", "1"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index 2608d4525b153..25802894e2832 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -105,7 +105,7 @@ public Collection createComponents( ) { CircuitBreaker circuitBreaker = indicesService.getBigArrays().breakerService().getBreaker("request"); Objects.requireNonNull(circuitBreaker, "request circuit breaker wasn't set"); - BlockFactory blockFactory = new BlockFactory(circuitBreaker, indicesService.getBigArrays()); + BlockFactory blockFactory = new BlockFactory(circuitBreaker, indicesService.getBigArrays().withCircuitBreaking()); return List.of( new PlanExecutor(new IndexResolver(client, clusterService.getClusterName().value(), EsqlDataTypeRegistry.INSTANCE, Set::of)), new ExchangeService(clusterService.getSettings(), threadPool, EsqlPlugin.ESQL_THREAD_POOL_NAME, blockFactory), From f2dfbfe8c45140ac79e039f40186311625d6c086 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Fri, 6 Oct 2023 14:25:27 +0200 Subject: [PATCH 025/176] [DOCS] Add sparse-vector field type to docs, changed references (#100348) --- docs/reference/mapping/types.asciidoc | 3 + .../mapping/types/sparse-vector.asciidoc | 36 ++++ .../query-dsl/text-expansion-query.asciidoc | 54 +++--- .../semantic-search-elser.asciidoc | 170 +++++++++--------- .../semantic-search/field-mappings.asciidoc | 54 +++--- 5 files changed, 178 insertions(+), 139 deletions(-) create mode 100644 docs/reference/mapping/types/sparse-vector.asciidoc diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index 7108d536f8715..fff736712529a 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -83,6 +83,7 @@ as-you-type completion. ==== Document ranking types <>:: Records dense vectors of float values. +<>:: Records sparse vectors of float values. <>:: Records a numeric feature to boost hits at query time. <>:: Records numeric features to boost hits at @@ -179,6 +180,8 @@ include::types/search-as-you-type.asciidoc[] include::types/shape.asciidoc[] +include::types/sparse-vector.asciidoc[] + include::types/text.asciidoc[] include::types/token-count.asciidoc[] diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc new file mode 100644 index 0000000000000..17a193eef1d4d --- /dev/null +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -0,0 +1,36 @@ +[[sparse-vector]] +=== Sparse vector field type +++++ +Sparse vector +++++ + +A `sparse_vector` field can index features and weights so that they can later be used to query +documents in queries with a <> query. + +`sparse_vector` is the field type that should be used with <>. + +[source,console] +-------------------------------------------------- +PUT my-index +{ + "mappings": { + "properties": { + "text.tokens": { + "type": "sparse_vector" + } + } + } +} +-------------------------------------------------- + +See <> for a complete example on adding documents + to a `sparse_vector` mapped field using ELSER. + +NOTE: `sparse_vector` fields only support single-valued fields and strictly positive +values. Multi-valued fields and negative values will be rejected. + +NOTE: `sparse_vector` fields do not support querying, sorting or aggregating. They may +only be used within <> queries. + +NOTE: `sparse_vector` fields only preserve 9 significant bits for the precision, which +translates to a relative error of about 0.4%. diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index 74ee80ba821a1..d15fd40846529 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -4,9 +4,9 @@ Text expansion ++++ -The text expansion query uses a {nlp} model to convert the query text into a -list of token-weight pairs which are then used in a query against a -<>. +The text expansion query uses a {nlp} model to convert the query text into a +list of token-weight pairs which are then used in a query against a +<> or <> field. [discrete] [[text-expansion-query-ex-request]] @@ -19,7 +19,7 @@ GET _search { "query":{ "text_expansion":{ - "":{ + "":{ "model_id":"the model to produce the token weights", "model_text":"the query string" } @@ -33,33 +33,33 @@ GET _search [[text-expansion-query-params]] === Top level parameters for `text_expansion` -``::: +``::: (Required, object) -The name of the field that contains the token-weight pairs the NLP model created +The name of the field that contains the token-weight pairs the NLP model created based on the input text. [discrete] [[text-expansion-rank-feature-field-params]] -=== Top level parameters for `` +=== Top level parameters for `` `model_id`:::: (Required, string) -The ID of the model to use to convert the query text into token-weight pairs. It -must be the same model ID that was used to create the tokens from the input +The ID of the model to use to convert the query text into token-weight pairs. It +must be the same model ID that was used to create the tokens from the input text. `model_text`:::: (Required, string) -The query text you want to use for search. +The query text you want to use for search. [discrete] [[text-expansion-query-example]] === Example -The following is an example of the `text_expansion` query that references the -ELSER model to perform semantic search. For a more detailed description of how -to perform semantic search by using ELSER and the `text_expansion` query, refer +The following is an example of the `text_expansion` query that references the +ELSER model to perform semantic search. For a more detailed description of how +to perform semantic search by using ELSER and the `text_expansion` query, refer to <>. [source,console] @@ -82,25 +82,25 @@ GET my-index/_search [[optimizing-text-expansion]] === Optimizing the search performance of the text_expansion query -https://www.elastic.co/blog/faster-retrieval-of-top-hits-in-elasticsearch-with-block-max-wand[Max WAND] -is an optimization technique used by {es} to skip documents that cannot score -competitively against the current best matching documents. However, the tokens -generated by the ELSER model don't work well with the Max WAND optimization. -Consequently, enabling Max WAND can actually increase query latency for -`text_expansion`. For datasets of a significant size, disabling Max +https://www.elastic.co/blog/faster-retrieval-of-top-hits-in-elasticsearch-with-block-max-wand[Max WAND] +is an optimization technique used by {es} to skip documents that cannot score +competitively against the current best matching documents. However, the tokens +generated by the ELSER model don't work well with the Max WAND optimization. +Consequently, enabling Max WAND can actually increase query latency for +`text_expansion`. For datasets of a significant size, disabling Max WAND leads to lower query latencies. Max WAND is controlled by the -<> query parameter. Setting track_total_hits -to true forces {es} to consider all documents, resulting in lower query -latencies for the `text_expansion` query. However, other {es} queries run slower +<> query parameter. Setting track_total_hits +to true forces {es} to consider all documents, resulting in lower query +latencies for the `text_expansion` query. However, other {es} queries run slower when Max WAND is disabled. -If you are combining the `text_expansion` query with standard text queries in a -compound search, it is recommended to measure the query performance before +If you are combining the `text_expansion` query with standard text queries in a +compound search, it is recommended to measure the query performance before deciding which setting to use. -NOTE: The `track_total_hits` option applies to all queries in the search request -and may be optimal for some queries but not for others. Take into account the -characteristics of all your queries to determine the most suitable +NOTE: The `track_total_hits` option applies to all queries in the search request +and may be optimal for some queries but not for others. Take into account the +characteristics of all your queries to determine the most suitable configuration. diff --git a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc index 082bb2ae2e020..0b4956dbe86ad 100644 --- a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc @@ -4,18 +4,18 @@ Semantic search with ELSER ++++ -Elastic Learned Sparse EncodeR - or ELSER - is an NLP model trained by Elastic -that enables you to perform semantic search by using sparse vector -representation. Instead of literal matching on search terms, semantic search -retrieves results based on the intent and the contextual meaning of a search +Elastic Learned Sparse EncodeR - or ELSER - is an NLP model trained by Elastic +that enables you to perform semantic search by using sparse vector +representation. Instead of literal matching on search terms, semantic search +retrieves results based on the intent and the contextual meaning of a search query. -The instructions in this tutorial shows you how to use ELSER to perform semantic +The instructions in this tutorial shows you how to use ELSER to perform semantic search on your data. -NOTE: Only the first 512 extracted tokens per field are considered during -semantic search with ELSER. Refer to -{ml-docs}/ml-nlp-limitations.html#ml-nlp-elser-v1-limit-512[this page] for more +NOTE: Only the first 512 extracted tokens per field are considered during +semantic search with ELSER. Refer to +{ml-docs}/ml-nlp-limitations.html#ml-nlp-elser-v1-limit-512[this page] for more information. @@ -23,18 +23,18 @@ information. [[requirements]] ==== Requirements -To perform semantic search by using ELSER, you must have the NLP model deployed -in your cluster. Refer to the -{ml-docs}/ml-nlp-elser.html[ELSER documentation] to learn how to download and +To perform semantic search by using ELSER, you must have the NLP model deployed +in your cluster. Refer to the +{ml-docs}/ml-nlp-elser.html[ELSER documentation] to learn how to download and deploy the model. -NOTE: The minimum dedicated ML node size for deploying and using the ELSER model -is 4 GB in Elasticsearch Service if -{cloud}/ec-autoscaling.html[deployment autoscaling] is turned off. Turning on -autoscaling is recommended because it allows your deployment to dynamically -adjust resources based on demand. Better performance can be achieved by using -more allocations or more threads per allocation, which requires bigger ML nodes. -Autoscaling provides bigger nodes when required. If autoscaling is turned off, +NOTE: The minimum dedicated ML node size for deploying and using the ELSER model +is 4 GB in Elasticsearch Service if +{cloud}/ec-autoscaling.html[deployment autoscaling] is turned off. Turning on +autoscaling is recommended because it allows your deployment to dynamically +adjust resources based on demand. Better performance can be achieved by using +more allocations or more threads per allocation, which requires bigger ML nodes. +Autoscaling provides bigger nodes when required. If autoscaling is turned off, you must provide suitably sized nodes yourself. @@ -42,17 +42,17 @@ you must provide suitably sized nodes yourself. [[elser-mappings]] ==== Create the index mapping -First, the mapping of the destination index - the index that contains the tokens -that the model created based on your text - must be created. The destination -index must have a field with the -<> field type to index the +First, the mapping of the destination index - the index that contains the tokens +that the model created based on your text - must be created. The destination +index must have a field with the +<> or <> field type to index the ELSER output. -NOTE: ELSER output must be ingested into a field with the `sparse_vector` or -`rank_features` field type. Otherwise, {es} interprets the token-weight pairs as -a massive amount of fields in a document. If you get an error similar to this -`"Limit of total fields [1000] has been exceeded while adding new fields"` then -the ELSER output field is not mapped properly and it has a field type different +NOTE: ELSER output must be ingested into a field with the `sparse_vector` or +`rank_features` field type. Otherwise, {es} interprets the token-weight pairs as +a massive amount of fields in a document. If you get an error similar to this +`"Limit of total fields [1000] has been exceeded while adding new fields"` then +the ELSER output field is not mapped properly and it has a field type different than `sparse_vector` or `rank_features`. [source,console] @@ -74,19 +74,19 @@ PUT my-index // TEST[skip:TBD] <1> The name of the field to contain the generated tokens. <2> The field to contain the tokens is a `sparse_vector` field. -<3> The name of the field from which to create the sparse vector representation. +<3> The name of the field from which to create the sparse vector representation. In this example, the name of the field is `text`. <4> The field type which is text in this example. -To learn how to optimize space, refer to the <> section. +To learn how to optimize space, refer to the <> section. [discrete] [[inference-ingest-pipeline]] ==== Create an ingest pipeline with an inference processor -Create an <> with an -<> to use ELSER to infer against the data +Create an <> with an +<> to use ELSER to infer against the data that is being ingested in the pipeline. [source,console] @@ -112,10 +112,10 @@ PUT _ingest/pipeline/elser-v2-test } ---- // TEST[skip:TBD] -<1> The `field_map` object maps the input document field name (which is `text` -in this example) to the name of the field that the model expects (which is +<1> The `field_map` object maps the input document field name (which is `text` +in this example) to the name of the field that the model expects (which is always `text_field`). -<2> The `text_expansion` inference type needs to be used in the {infer} ingest +<2> The `text_expansion` inference type needs to be used in the {infer} ingest processor. @@ -123,19 +123,19 @@ processor. [[load-data]] ==== Load data -In this step, you load the data that you later use in the {infer} ingest +In this step, you load the data that you later use in the {infer} ingest pipeline to extract tokens from it. -Use the `msmarco-passagetest2019-top1000` data set, which is a subset of the MS -MARCO Passage Ranking data set. It consists of 200 queries, each accompanied by -a list of relevant text passages. All unique passages, along with their IDs, -have been extracted from that data set and compiled into a +Use the `msmarco-passagetest2019-top1000` data set, which is a subset of the MS +MARCO Passage Ranking data set. It consists of 200 queries, each accompanied by +a list of relevant text passages. All unique passages, along with their IDs, +have been extracted from that data set and compiled into a https://github.com/elastic/stack-docs/blob/main/docs/en/stack/ml/nlp/data/msmarco-passagetest2019-unique.tsv[tsv file]. -Download the file and upload it to your cluster using the -{kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[Data Visualizer] -in the {ml-app} UI. Assign the name `id` to the first column and `text` to the -second column. The index name is `test-data`. Once the upload is complete, you +Download the file and upload it to your cluster using the +{kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[Data Visualizer] +in the {ml-app} UI. Assign the name `id` to the first column and `text` to the +second column. The index name is `test-data`. Once the upload is complete, you can see an index named `test-data` with 182469 documents. @@ -143,7 +143,7 @@ can see an index named `test-data` with 182469 documents. [[reindexing-data-elser]] ==== Ingest the data through the {infer} ingest pipeline -Create the tokens from the text by reindexing the data throught the {infer} +Create the tokens from the text by reindexing the data throught the {infer} pipeline that uses ELSER as the inference model. [source,console] @@ -161,8 +161,8 @@ POST _reindex?wait_for_completion=false } ---- // TEST[skip:TBD] -<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller -number makes the update of the reindexing process quicker which enables you to +<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller +number makes the update of the reindexing process quicker which enables you to follow the progress closely and detect errors early. The call returns a task ID to monitor the progress: @@ -173,7 +173,7 @@ GET _tasks/ ---- // TEST[skip:TBD] -You can also open the Trained Models UI, select the Pipelines tab under ELSER to +You can also open the Trained Models UI, select the Pipelines tab under ELSER to follow the progress. @@ -181,9 +181,9 @@ follow the progress. [[text-expansion-query]] ==== Semantic search by using the `text_expansion` query -To perform semantic search, use the `text_expansion` query, and provide the -query text and the ELSER model ID. The example below uses the query text "How to -avoid muscle soreness after running?", the `ml.tokens` field contains the +To perform semantic search, use the `text_expansion` query, and provide the +query text and the ELSER model ID. The example below uses the query text "How to +avoid muscle soreness after running?", the `ml.tokens` field contains the generated ELSER output: [source,console] @@ -202,9 +202,9 @@ GET my-index/_search ---- // TEST[skip:TBD] -The result is the top 10 documents that are closest in meaning to your query -text from the `my-index` index sorted by their relevancy. The result also -contains the extracted tokens for each of the relevant search results with their +The result is the top 10 documents that are closest in meaning to your query +text from the `my-index` index sorted by their relevancy. The result also +contains the extracted tokens for each of the relevant search results with their weights. [source,consol-result] @@ -246,7 +246,7 @@ weights. ---- // NOTCONSOLE -To learn about optimizing your `text_expansion` query, refer to +To learn about optimizing your `text_expansion` query, refer to <>. @@ -254,16 +254,16 @@ To learn about optimizing your `text_expansion` query, refer to [[text-expansion-compound-query]] ==== Combining semantic search with other queries -You can combine `text_expansion` with other queries in a -<>. For example using a filter clause in a -<> or a full text query which may or may not use the same -query text as the `text_expansion` query. This enables you to combine the search +You can combine `text_expansion` with other queries in a +<>. For example using a filter clause in a +<> or a full text query which may or may not use the same +query text as the `text_expansion` query. This enables you to combine the search results from both queries. -The search hits from the `text_expansion` query tend to score higher than other -{es} queries. Those scores can be regularized by increasing or decreasing the -relevance scores of each query by using the `boost` parameter. Recall on the -`text_expansion` query can be high where there is a long tail of less relevant +The search hits from the `text_expansion` query tend to score higher than other +{es} queries. Those scores can be regularized by increasing or decreasing the +relevance scores of each query by using the `boost` parameter. Recall on the +`text_expansion` query can be high where there is a long tail of less relevant results. Use the `min_score` parameter to prune those less relevant documents. [source,console] @@ -274,7 +274,7 @@ GET my-index/_search "bool": { <1> "should": [ { - "text_expansion": { + "text_expansion": { "ml.tokens": { "model_text": "How to avoid muscle soreness after running?", "model_id": ".elser_model_2", @@ -295,13 +295,13 @@ GET my-index/_search } ---- // TEST[skip:TBD] -<1> Both the `text_expansion` and the `query_string` queries are in a `should` +<1> Both the `text_expansion` and the `query_string` queries are in a `should` clause of a `bool` query. -<2> The `boost` value is `1` for the `text_expansion` query which is the default -value. This means that the relevance score of the results of this query are not +<2> The `boost` value is `1` for the `text_expansion` query which is the default +value. This means that the relevance score of the results of this query are not boosted. -<3> The `boost` value is `4` for the `query_string` query. The relevance score -of the results of this query is increased causing them to rank higher in the +<3> The `boost` value is `4` for the `query_string` query. The relevance score +of the results of this query is increased causing them to rank higher in the search results. <4> Only the results with a score equal to or higher than `10` are displayed. @@ -314,22 +314,22 @@ search results. [[save-space]] ==== Saving disk space by excluding the ELSER tokens from document source -The tokens generated by ELSER must be indexed for use in the -<>. However, it is not -necessary to retain those terms in the document source. You can save disk space -by using the <> mapping to remove the ELSER -terms from the document source. - -WARNING: Reindex uses the document source to populate the destination index. -Once the ELSER terms have been excluded from the source, they cannot be -recovered through reindexing. Excluding the tokens from the source is a -space-saving optimsation that should only be applied if you are certain that -reindexing will not be required in the future! It's important to carefully -consider this trade-off and make sure that excluding the ELSER terms from the +The tokens generated by ELSER must be indexed for use in the +<>. However, it is not +necessary to retain those terms in the document source. You can save disk space +by using the <> mapping to remove the ELSER +terms from the document source. + +WARNING: Reindex uses the document source to populate the destination index. +Once the ELSER terms have been excluded from the source, they cannot be +recovered through reindexing. Excluding the tokens from the source is a +space-saving optimsation that should only be applied if you are certain that +reindexing will not be required in the future! It's important to carefully +consider this trade-off and make sure that excluding the ELSER terms from the source aligns with your specific requirements and use case. -The mapping that excludes `ml.tokens` from the `_source` field can be created -by the following API call: +The mapping that excludes `ml.tokens` from the `_source` field can be created +by the following API call: [source,console] ---- @@ -343,10 +343,10 @@ PUT my-index }, "properties": { "ml.tokens": { - "type": "sparse_vector" + "type": "sparse_vector" }, - "text": { - "type": "text" + "text": { + "type": "text" } } } diff --git a/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc b/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc index 0228078e8ce39..c93ddc3a803a7 100644 --- a/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc @@ -1,14 +1,14 @@ // tag::elser[] -ELSER produces token-weight pairs as output from the input text and the query. -The {es} <> field type can store these -token-weight pairs as numeric feature vectors. The index must have a field with +ELSER produces token-weight pairs as output from the input text and the query. +The {es} <> field type can store these +token-weight pairs as numeric feature vectors. The index must have a field with the `sparse_vector` field type to index the tokens that ELSER generates. -To create a mapping for your ELSER index, refer to the -<> of the tutorial. The example -shows how to create an index mapping for `my-index` that defines the -`my_embeddings.tokens` field - which will contain the ELSER output - as a +To create a mapping for your ELSER index, refer to the +<> of the tutorial. The example +shows how to create an index mapping for `my-index` that defines the +`my_embeddings.tokens` field - which will contain the ELSER output - as a `sparse_vector` field. [source,console] @@ -29,7 +29,7 @@ PUT my-index ---- <1> The name of the field that will contain the tokens generated by ELSER. <2> The field that contains the tokens must be a `sparse_vector` field. -<3> The name of the field from which to create the sparse vector representation. +<3> The name of the field from which to create the sparse vector representation. In this example, the name of the field is `my_text_field`. <4> The field type is `text` in this example. @@ -38,21 +38,21 @@ In this example, the name of the field is `my_text_field`. // tag::dense-vector[] -The models compatible with {es} NLP generate dense vectors as output. The -<> field type is suitable for storing dense vectors -of numeric values. The index must have a field with the `dense_vector` field -type to index the embeddings that the supported third-party model that you -selected generates. Keep in mind that the model produces embeddings with a -certain number of dimensions. The `dense_vector` field must be configured with -the same number of dimensions using the `dims` option. Refer to the respective -model documentation to get information about the number of dimensions of the +The models compatible with {es} NLP generate dense vectors as output. The +<> field type is suitable for storing dense vectors +of numeric values. The index must have a field with the `dense_vector` field +type to index the embeddings that the supported third-party model that you +selected generates. Keep in mind that the model produces embeddings with a +certain number of dimensions. The `dense_vector` field must be configured with +the same number of dimensions using the `dims` option. Refer to the respective +model documentation to get information about the number of dimensions of the embeddings. -To review a mapping of an index for an NLP model, refer to the mapping code -snippet in the -{ml-docs}/ml-nlp-text-emb-vector-search-example.html#ex-text-emb-ingest[Add the text embedding model to an ingest inference pipeline] -section of the tutorial. The example shows how to create an index mapping that -defines the `my_embeddings.predicted_value` field - which will contain the model +To review a mapping of an index for an NLP model, refer to the mapping code +snippet in the +{ml-docs}/ml-nlp-text-emb-vector-search-example.html#ex-text-emb-ingest[Add the text embedding model to an ingest inference pipeline] +section of the tutorial. The example shows how to create an index mapping that +defines the `my_embeddings.predicted_value` field - which will contain the model output - as a `dense_vector` field. [source,console] @@ -74,16 +74,16 @@ PUT my-index } } ---- -<1> The name of the field that will contain the embeddings generated by the +<1> The name of the field that will contain the embeddings generated by the model. <2> The field that contains the embeddings must be a `dense_vector` field. -<3> The model produces embeddings with a certain number of dimensions. The -`dense_vector` field must be configured with the same number of dimensions by -the `dims` option. Refer to the respective model documentation to get +<3> The model produces embeddings with a certain number of dimensions. The +`dense_vector` field must be configured with the same number of dimensions by +the `dims` option. Refer to the respective model documentation to get information about the number of dimensions of the embeddings. -<4> The name of the field from which to create the dense vector representation. +<4> The name of the field from which to create the dense vector representation. In this example, the name of the field is `my_text_field`. <5> The field type is `text` in this example. -// end::dense-vector[] \ No newline at end of file +// end::dense-vector[] From 3c12c31f3bfb427f73b8ab53727acd4b7fdd9a10 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Fri, 6 Oct 2023 15:46:30 +0200 Subject: [PATCH 026/176] ESQL: Improve agg verification (#99827) When verifying aggregation expressions like from employees | stats percentile(salary_change, 25*2) , both arguments are treated the same way during verification. This is incorrect, as salary_change is the actual aggregation's field, while 25*2 is merely it's (first and only) parameter. This is overly restrictive. Apply the current verification only to the aggregation's actual field, as the parameter is already verified during type resolution (it needs to be a constant expression). --- docs/changelog/99827.yaml | 5 +++ .../resources/stats_count_distinct.csv-spec | 26 +++++++++++++- .../main/resources/stats_percentile.csv-spec | 14 ++++++++ .../xpack/esql/analysis/Verifier.java | 36 +++++++++---------- .../function/aggregate/CountDistinct.java | 3 +- .../function/aggregate/Percentile.java | 6 +--- .../xpack/esql/analysis/VerifierTests.java | 12 +++++-- .../xpack/ql/expression/Expression.java | 4 +++ 8 files changed, 79 insertions(+), 27 deletions(-) create mode 100644 docs/changelog/99827.yaml diff --git a/docs/changelog/99827.yaml b/docs/changelog/99827.yaml new file mode 100644 index 0000000000000..3e6690a8e9e68 --- /dev/null +++ b/docs/changelog/99827.yaml @@ -0,0 +1,5 @@ +pr: 99827 +summary: "ESQL: Fix NPE when aggregating literals" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec index 462045d9968ee..68f67b8a2743b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec @@ -117,7 +117,7 @@ m:long | languages:i 15 | 4 20 | 5 10 | null -; +; countDistinctOfIpGroupByKeyword from hosts | stats h0 = count_distinct(ip0), h1 = count_distinct(ip1) by host | sort host; @@ -128,3 +128,27 @@ h0:long | h1:long | host:keyword 5 | 6 | epsilon 1 | 2 | gamma ; + +countDistinctWithPrecisionExpression +from employees | stats m = count_distinct(height, 9875+1) by languages | sort languages; + +m:long | languages:i +13 | 1 +16 | 2 +14 | 3 +15 | 4 +20 | 5 +10 | null +; + +countDistinctWithComplexPrecisionExpression +from employees | stats m = count_distinct(height, 9876*3+(-9876*2)) by languages | sort languages; + +m:long | languages:i +13 | 1 +16 | 2 +14 | 3 +15 | 4 +20 | 5 +10 | null +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec index eaa0786588480..6ab061b33dfb0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec @@ -142,3 +142,17 @@ MEDIAN(salary):double | MEDIAN_ABSOLUTE_DEVIATION(salary):double 47003 | 10096.5 // end::median-absolute-deviation-result[] ; + +medianViaExpression +from employees | stats p50 = percentile(salary_change, 25*2); + +p50:double +0.75 +; + +medianViaComplexExpression +from employees | stats p50 = percentile(salary_change, -(50-1)+99); + +p50:double +0.75 +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 1f5e6ee3fd6ed..59c6e2782b014 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -125,24 +125,24 @@ else if (p.resolved()) { agg.aggregates().forEach(e -> { var exp = e instanceof Alias ? ((Alias) e).child() : e; if (exp instanceof AggregateFunction aggFunc) { - aggFunc.arguments().forEach(a -> { - // TODO: allow an expression? - if ((a instanceof FieldAttribute - || a instanceof MetadataAttribute - || a instanceof ReferenceAttribute - || a instanceof Literal) == false) { - failures.add( - fail( - e, - "aggregate function's parameters must be an attribute or literal; found [" - + a.sourceText() - + "] of type [" - + a.nodeName() - + "]" - ) - ); - } - }); + Expression field = aggFunc.field(); + + // TODO: allow an expression? + if ((field instanceof FieldAttribute + || field instanceof MetadataAttribute + || field instanceof ReferenceAttribute + || field instanceof Literal) == false) { + failures.add( + fail( + e, + "aggregate function's field must be an attribute or literal; found [" + + field.sourceText() + + "] of type [" + + field.nodeName() + + "]" + ) + ); + } } else if (agg.groupings().contains(exp) == false) { // TODO: allow an expression? failures.add( fail( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 044d89d41a0c5..2e20af1889773 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -29,6 +29,7 @@ import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isFoldable; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isInteger; public class CountDistinct extends AggregateFunction implements OptionalArgument, ToAggregator { @@ -66,7 +67,7 @@ protected TypeResolution resolveType() { return resolution; } - return isInteger(precision, sourceText(), SECOND); + return isInteger(precision, sourceText(), SECOND).and(isFoldable(precision, sourceText(), SECOND)); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java index db560ff7043df..9e4eccb964de4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java @@ -56,11 +56,7 @@ protected TypeResolution resolveType() { return resolution; } - resolution = isNumeric(percentile, sourceText(), SECOND); - if (resolution.unresolved()) { - return resolution; - } - return isFoldable(percentile, sourceText(), SECOND); + return isNumeric(percentile, sourceText(), SECOND).and(isFoldable(percentile, sourceText(), SECOND)); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 3c1a9800d6d11..10f134432a0a2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -68,7 +68,7 @@ public void testAggsExpressionsInStatsAggs() { error("from test | stats length(first_name), count(1) by first_name") ); assertEquals( - "1:19: aggregate function's parameters must be an attribute or literal; found [emp_no / 2] of type [Div]", + "1:19: aggregate function's field must be an attribute or literal; found [emp_no / 2] of type [Div]", error("from test | stats x = avg(emp_no / 2) by emp_no") ); assertEquals( @@ -76,13 +76,21 @@ public void testAggsExpressionsInStatsAggs() { error("from test | stats count(avg(first_name)) by first_name") ); assertEquals( - "1:19: aggregate function's parameters must be an attribute or literal; found [length(first_name)] of type [Length]", + "1:19: aggregate function's field must be an attribute or literal; found [length(first_name)] of type [Length]", error("from test | stats count(length(first_name)) by first_name") ); assertEquals( "1:23: expected an aggregate function or group but got [emp_no + avg(emp_no)] of type [Add]", error("from test | stats x = emp_no + avg(emp_no) by emp_no") ); + assertEquals( + "1:23: second argument of [percentile(languages, languages)] must be a constant, received [languages]", + error("from test | stats x = percentile(languages, languages) by emp_no") + ); + assertEquals( + "1:23: second argument of [count_distinct(languages, languages)] must be a constant, received [languages]", + error("from test | stats x = count_distinct(languages, languages) by emp_no") + ); } public void testDoubleRenamingField() { diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expression.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expression.java index 5f56694934f1b..9e95dab82df19 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expression.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/Expression.java @@ -50,6 +50,10 @@ public boolean resolved() { return failed == false; } + public TypeResolution and(TypeResolution other) { + return failed ? this : other; + } + public String message() { return message; } From a5a7c0e8fd2e819c3cbefa66fa537efdace474cc Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Fri, 6 Oct 2023 17:09:49 +0300 Subject: [PATCH 027/176] ESQL: support metric tsdb fields while querying index patterns (#100351) * Make incompatible metric fields as incompatible types in QL * Added tests for SQL and EQL with conflicting tsdb fields --- docs/changelog/100351.yaml | 6 + .../rest-api-spec/test/eql/40_tsdb.yml | 174 ++++++++++++++ .../resources/rest-api-spec/test/40_tsdb.yml | 55 ++++- .../esql/enrich/EnrichPolicyResolver.java | 4 +- .../xpack/esql/session/EsqlSession.java | 37 ++- .../esql/type/EsqlDataTypeRegistryTests.java | 8 +- .../xpack/ql/index/IndexResolver.java | 43 +++- .../xpack/sql/qa/rest/RestSqlTestCase.java | 220 ++++++++++++++++++ 8 files changed, 534 insertions(+), 13 deletions(-) create mode 100644 docs/changelog/100351.yaml create mode 100644 x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/40_tsdb.yml diff --git a/docs/changelog/100351.yaml b/docs/changelog/100351.yaml new file mode 100644 index 0000000000000..d8ba19b70cbed --- /dev/null +++ b/docs/changelog/100351.yaml @@ -0,0 +1,6 @@ +pr: 100351 +summary: "ESQL: support metric tsdb fields while querying index patterns" +area: ES|QL +type: bug +issues: + - 100144 diff --git a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/40_tsdb.yml b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/40_tsdb.yml new file mode 100644 index 0000000000000..1eba314502337 --- /dev/null +++ b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/40_tsdb.yml @@ -0,0 +1,174 @@ +setup: + - do: + indices.create: + index: test1 + body: + settings: + index: + mode: time_series + routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + event.category: + type: keyword + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + time_series_metric: counter + rx: + type: long + time_series_metric: counter + - do: + bulk: + refresh: true + index: test1 + body: + - '{"index": {}}' + - '{"id":1, "@timestamp": "2021-04-28T18:50:04.467Z", "event.category": "process", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"index": {}}' + - '{"id":2, "@timestamp": "2021-04-28T18:50:24.467Z", "event.category": "process", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"index": {}}' + - '{"id":3, "@timestamp": "2021-04-28T18:50:44.467Z", "event.category": "process", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"index": {}}' + - '{"id":4, "@timestamp": "2021-04-28T18:51:04.467Z", "event.category": "process", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"index": {}}' + - '{"id":5, "@timestamp": "2021-04-28T18:50:03.142Z", "event.category": "process", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"id":6, "@timestamp": "2021-04-28T18:50:23.142Z", "event.category": "process", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"id":7, "@timestamp": "2021-04-28T18:50:53.142Z", "event.category": "network", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"id":8, "@timestamp": "2021-04-28T18:51:03.142Z", "event.category": "network", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + + - do: + indices.create: + index: test2 + body: + settings: + index: + mode: time_series + routing_path: [ dim ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + event.category: + type: keyword + dim: + type: keyword + time_series_dimension: true + agg_metric: + type: aggregate_metric_double + metrics: + - max + default_metric: max + k8s: + properties: + pod: + properties: + ip: + type: ip + network: + properties: + tx: + type: long + - do: + bulk: + refresh: true + index: test2 + body: + - '{"index": {}}' + - '{"id":100, "@timestamp": "2021-04-28T18:49:04.467Z", "event.category": "network", "dim": "A", "agg_metric": {"max": 10}}' + - '{"index": {}}' + - '{"id":200, "@timestamp": "2021-04-28T18:49:24.467Z", "event.category": "process", "dim": "B", "agg_metric": {"max": 20}, "k8s.pod.network.tx": 2000000001}' + - '{"index": {}}' + - '{"id":300, "@timestamp": "2021-04-28T18:49:34.467Z", "event.category": "process", "dim": "B", "agg_metric": {"max": 20}, "k8s.pod.network.tx": 1}' + +--- +test* where counter: + - do: + eql.search: + index: test* + body: + query: 'process where k8s.pod.network.tx > 2000000000' + + - match: {timed_out: false} + - match: {hits.total.value: 5} + - match: {hits.total.relation: "eq"} + - match: {hits.events.0._source.id: 200} + - match: {hits.events.1._source.id: 1} + - match: {hits.events.2._source.id: 2} + - match: {hits.events.3._source.id: 3} + - match: {hits.events.4._source.id: 4} + +--- +test1 where counter: + - do: + eql.search: + index: test1 + body: + query: 'process where k8s.pod.network.tx > 2000000000' + + - match: {timed_out: false} + - match: {hits.total.value: 4} + - match: {hits.total.relation: "eq"} + - match: {hits.events.0._source.id: 1} + - match: {hits.events.1._source.id: 2} + - match: {hits.events.2._source.id: 3} + - match: {hits.events.3._source.id: 4} + +--- +test2 where counter: + - do: + eql.search: + index: test2 + body: + query: 'process where k8s.pod.network.tx > 2000000000' + + - match: {timed_out: false} + - match: {hits.total.value: 1} + - match: {hits.total.relation: "eq"} + - match: {hits.events.0._source.id: 200} + +--- +test* where true: + - do: + eql.search: + index: test* + body: + query: 'process where true' + + - match: {timed_out: false} + - match: {hits.total.value: 8} + - match: {hits.total.relation: "eq"} + - match: {hits.events.0._source.id: 200} + - match: {hits.events.1._source.id: 300} + - match: {hits.events.2._source.id: 5} + - match: {hits.events.3._source.id: 1} + - match: {hits.events.4._source.id: 6} + - match: {hits.events.5._source.id: 2} + - match: {hits.events.6._source.id: 3} + - match: {hits.events.7._source.id: 4} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml index 2e8c43379d690..14ae1ff98d8ad 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml @@ -83,10 +83,20 @@ setup: metrics: - max default_metric: max + k8s: + properties: + pod: + properties: + ip: + type: ip + network: + properties: + tx: + type: long - do: bulk: refresh: true - index: test + index: test2 body: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim": "A", "agg_metric": {"max": 10}}' @@ -156,7 +166,11 @@ from doc with aggregate_metric_double: - match: {columns.1.type: "unsupported"} - match: {columns.2.name: "dim"} - match: {columns.2.type: "keyword"} - - length: {values: 0} + - match: {columns.3.name: "k8s.pod.ip"} + - match: {columns.3.type: "ip"} + - match: {columns.4.name: "k8s.pod.network.tx"} + - match: {columns.4.type: "long"} + - length: {values: 2} --- stats on aggregate_metric_double: @@ -164,4 +178,39 @@ stats on aggregate_metric_double: catch: /Cannot use field \[agg_metric\] with unsupported type \[aggregate_metric_double\]/ esql.query: body: - query: 'FROM test2 | STATS max(agg_metric) BY dim ' + query: 'FROM test2 | STATS max(agg_metric) BY dim' + +--- +from index pattern unsupported counter: + - do: + esql.query: + body: + query: 'FROM test*' + + - match: {columns.0.name: "@timestamp"} + - match: {columns.0.type: "date"} + - match: {columns.1.name: "agg_metric"} + - match: {columns.1.type: "unsupported"} + - match: {columns.2.name: "dim"} + - match: {columns.2.type: "keyword"} + - match: {columns.3.name: "k8s.pod.ip"} + - match: {columns.3.type: "ip"} + - match: {columns.4.name: "k8s.pod.name"} + - match: {columns.4.type: "keyword"} + - match: {columns.5.name: "k8s.pod.network.rx"} + - match: {columns.5.type: "unsupported"} + - match: {columns.6.name: "k8s.pod.network.tx"} + - match: {columns.6.type: "unsupported"} + - match: {columns.7.name: "k8s.pod.uid"} + - match: {columns.7.type: "keyword"} + - match: {columns.8.name: "metricset"} + - match: {columns.8.type: "keyword"} + - length: {values: 10} + +--- +from index pattern explicit counter use: + - do: + catch: '/Cannot use field \[k8s.pod.network.tx\] due to ambiguities being mapped as different metric types in indices: \[test, test2\]/' + esql.query: + body: + query: 'FROM test* | keep *.tx' diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index cdafee6d76ef0..1e21886a7ac4b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -26,6 +26,7 @@ import org.elasticsearch.xpack.core.enrich.EnrichMetadata; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.ql.index.IndexResolver; import java.util.Map; @@ -117,7 +118,8 @@ public void messageReceived(ResolveRequest request, TransportChannel channel, Ta IndexResolver.ALL_FIELDS, false, Map.of(), - listener.map(indexResult -> new ResolveResponse(new EnrichPolicyResolution(policyName, policy, indexResult))) + listener.map(indexResult -> new ResolveResponse(new EnrichPolicyResolution(policyName, policy, indexResult))), + EsqlSession::specificValidity ); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 1cde7857310ae..7ce56ae537394 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.session; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -46,6 +47,7 @@ import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.ql.plan.logical.Project; +import org.elasticsearch.xpack.ql.type.InvalidMappedField; import org.elasticsearch.xpack.ql.util.Holder; import java.util.HashSet; @@ -57,6 +59,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; +import static org.elasticsearch.xpack.ql.index.IndexResolver.UNMAPPED; import static org.elasticsearch.xpack.ql.util.ActionListeners.map; import static org.elasticsearch.xpack.ql.util.StringUtils.WILDCARD; @@ -173,7 +176,7 @@ private void preAnalyzeIndices(LogicalPlan parsed, ActionListener types) { + boolean hasUnmapped = types.containsKey(UNMAPPED); + boolean hasTypeConflicts = types.size() > (hasUnmapped ? 2 : 1); + String metricConflictsTypeName = null; + boolean hasMetricConflicts = false; + + if (hasTypeConflicts == false) { + for (Map.Entry type : types.entrySet()) { + if (UNMAPPED.equals(type.getKey())) { + continue; + } + if (type.getValue().metricConflictsIndices() != null && type.getValue().metricConflictsIndices().length > 0) { + hasMetricConflicts = true; + metricConflictsTypeName = type.getKey(); + break; + } + } + } + + InvalidMappedField result = null; + if (hasMetricConflicts) { + StringBuilder errorMessage = new StringBuilder(); + errorMessage.append( + "mapped as different metric types in indices: [" + + String.join(", ", types.get(metricConflictsTypeName).metricConflictsIndices()) + + "]" + ); + result = new InvalidMappedField(fieldName, errorMessage.toString()); + } + return result; + }; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java index 3620fcc8c5926..fed396523dd9b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistryTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.index.IndexResolver; import org.elasticsearch.xpack.ql.type.DataType; @@ -51,7 +52,12 @@ private void resolve(String esTypeName, TimeSeriesParams.MetricType metricType, Map.of() ); FieldCapabilitiesResponse caps = new FieldCapabilitiesResponse(indices, Map.of(fieldCap.getName(), Map.of(esTypeName, fieldCap))); - IndexResolution resolution = IndexResolver.mergedMappings(EsqlDataTypeRegistry.INSTANCE, "idx-*", caps); + IndexResolution resolution = IndexResolver.mergedMappings( + EsqlDataTypeRegistry.INSTANCE, + "idx-*", + caps, + EsqlSession::specificValidity + ); EsField f = resolution.get().mapping().get(fieldCap.getName()); assertThat(f.getDataType(), equalTo(expected)); diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java index c0c3068b4d98b..1fae7173b0e7b 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java @@ -134,7 +134,7 @@ public String toString() { ); public static final Set ALL_FIELDS = Set.of("*"); - private static final String UNMAPPED = "unmapped"; + public static final String UNMAPPED = "unmapped"; private final Client client; private final String clusterName; @@ -339,18 +339,35 @@ public void resolveAsMergedMapping( boolean includeFrozen, Map runtimeMappings, ActionListener listener + ) { + resolveAsMergedMapping(indexWildcard, fieldNames, includeFrozen, runtimeMappings, listener, (fieldName, types) -> null); + } + + /** + * Resolves a pattern to one (potentially compound meaning that spawns multiple indices) mapping. + */ + public void resolveAsMergedMapping( + String indexWildcard, + Set fieldNames, + boolean includeFrozen, + Map runtimeMappings, + ActionListener listener, + BiFunction, InvalidMappedField> specificValidityVerifier ) { FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard, fieldNames, includeFrozen, runtimeMappings); client.fieldCaps( fieldRequest, - listener.delegateFailureAndWrap((l, response) -> l.onResponse(mergedMappings(typeRegistry, indexWildcard, response))) + listener.delegateFailureAndWrap( + (l, response) -> l.onResponse(mergedMappings(typeRegistry, indexWildcard, response, specificValidityVerifier)) + ) ); } public static IndexResolution mergedMappings( DataTypeRegistry typeRegistry, String indexPattern, - FieldCapabilitiesResponse fieldCapsResponse + FieldCapabilitiesResponse fieldCapsResponse, + BiFunction, InvalidMappedField> specificValidityVerifier ) { if (fieldCapsResponse.getIndices().length == 0) { @@ -358,9 +375,13 @@ public static IndexResolution mergedMappings( } // merge all indices onto the same one - List indices = buildIndices(typeRegistry, null, fieldCapsResponse, null, i -> indexPattern, (n, types) -> { - StringBuilder errorMessage = new StringBuilder(); + List indices = buildIndices(typeRegistry, null, fieldCapsResponse, null, i -> indexPattern, (fieldName, types) -> { + InvalidMappedField f = specificValidityVerifier.apply(fieldName, types); + if (f != null) { + return f; + } + StringBuilder errorMessage = new StringBuilder(); boolean hasUnmapped = types.containsKey(UNMAPPED); if (types.size() > (hasUnmapped ? 2 : 1)) { @@ -384,7 +405,7 @@ public static IndexResolution mergedMappings( errorMessage.insert(0, "mapped as [" + (types.size() - (hasUnmapped ? 1 : 0)) + "] incompatible types: "); - return new InvalidMappedField(n, errorMessage.toString()); + return new InvalidMappedField(fieldName, errorMessage.toString()); } // type is okay, check aggregation else { @@ -404,7 +425,7 @@ public static IndexResolution mergedMappings( } if (errorMessage.length() > 0) { - return new InvalidMappedField(n, errorMessage.toString()); + return new InvalidMappedField(fieldName, errorMessage.toString()); } } @@ -428,6 +449,14 @@ public static IndexResolution mergedMappings( } } + public static IndexResolution mergedMappings( + DataTypeRegistry typeRegistry, + String indexPattern, + FieldCapabilitiesResponse fieldCapsResponse + ) { + return mergedMappings(typeRegistry, indexPattern, fieldCapsResponse, (fieldName, types) -> null); + } + private static EsField createField( DataTypeRegistry typeRegistry, String fieldName, diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index 3f8e7801118ec..e47c3afe6a776 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -1167,6 +1167,226 @@ public void testPreventedUnsignedLongMaskedAccess() throws IOException { ); } + public void testTsdbMetrics() throws IOException { + createTsdb1(); + createTsdb2(); + + String mode = randomMode(); + // select all from tsdb1 + Map expected = new HashMap<>(); + expected.put( + "columns", + Arrays.asList( + columnInfo(mode, "@timestamp", "datetime", JDBCType.DATE, 34), + columnInfo(mode, "k8s.pod.ip", "ip", JDBCType.VARCHAR, 45), + columnInfo(mode, "k8s.pod.name", "keyword", JDBCType.VARCHAR, 32766), + columnInfo(mode, "k8s.pod.network.rx", "long", JDBCType.BIGINT, 20), + columnInfo(mode, "k8s.pod.network.tx", "long", JDBCType.BIGINT, 20), + columnInfo(mode, "k8s.pod.uid", "keyword", JDBCType.VARCHAR, 32766), + columnInfo(mode, "metricset", "keyword", JDBCType.VARCHAR, 32766) + ) + ); + expected.put( + "rows", + singletonList( + Arrays.asList( + "2021-04-28T18:50:04.467Z", + "10.10.55.1", + "cat", + 802133794, + 2001818691, + "947e4ced-1786-4e53-9e0c-5c447e959507", + "pod" + ) + ) + ); + assertResponse(expected, runSql(mode, "SELECT * FROM tsdb1", false)); + + // select all from tsdb2 + expected = new HashMap<>(); + expected.put( + "columns", + Arrays.asList( + columnInfo(mode, "@timestamp", "datetime", JDBCType.DATE, 34), + columnInfo(mode, "dim", "keyword", JDBCType.VARCHAR, 32766), + columnInfo(mode, "k8s.pod.ip", "ip", JDBCType.VARCHAR, 45), + columnInfo(mode, "k8s.pod.network.tx", "long", JDBCType.BIGINT, 20) + ) + ); + expected.put("rows", singletonList(Arrays.asList("2021-04-28T18:50:04.467Z", "A", null, null))); + assertResponse(expected, runSql(mode, "SELECT * FROM tsdb2", false)); + + // select all from both indices + expected = new HashMap<>(); + expected.put( + "columns", + Arrays.asList( + columnInfo(mode, "@timestamp", "datetime", JDBCType.DATE, 34), + columnInfo(mode, "dim", "keyword", JDBCType.VARCHAR, 32766), + columnInfo(mode, "k8s.pod.ip", "ip", JDBCType.VARCHAR, 45), + columnInfo(mode, "k8s.pod.name", "keyword", JDBCType.VARCHAR, 32766), + columnInfo(mode, "k8s.pod.network.rx", "long", JDBCType.BIGINT, 20), + columnInfo(mode, "k8s.pod.network.tx", "long", JDBCType.BIGINT, 20), + columnInfo(mode, "k8s.pod.uid", "keyword", JDBCType.VARCHAR, 32766), + columnInfo(mode, "metricset", "keyword", JDBCType.VARCHAR, 32766) + ) + ); + expected.put( + "rows", + Arrays.asList( + Arrays.asList( + "2021-04-28T18:50:04.467Z", + null, + "10.10.55.1", + "cat", + 802133794, + 2001818691, + "947e4ced-1786-4e53-9e0c-5c447e959507", + "pod" + ), + Arrays.asList("2021-04-28T18:50:04.467Z", "A", null, null, null, null, null, null) + ) + ); + assertResponse(expected, runSql(mode, "SELECT * FROM \\\"tsdb*\\\"", false)); + + // select the column that is both mapped as counter and as long + expected = new HashMap<>(); + expected.put("columns", Arrays.asList(columnInfo(mode, "k8s.pod.network.tx", "long", JDBCType.BIGINT, 20))); + expected.put("rows", Arrays.asList(singletonList(2001818691), singletonList(null))); + assertResponse(expected, runSql(mode, "SELECT k8s.pod.network.tx FROM \\\"tsdb*\\\"", false)); + + deleteIndex(client(), "tsdb1"); + deleteIndex(client(), "tsdb2"); + } + + private void createTsdb2() throws IOException { + Request request = new Request("PUT", "/tsdb2"); + request.setJsonEntity(""" + { + "settings": { + "index": { + "mode": "time_series", + "routing_path": ["dim"], + "time_series": { + "start_time": "2021-04-28T00:00:00Z", + "end_time": "2021-04-29T00:00:00Z" + } + } + }, + "mappings": { + "properties": { + "@timestamp": { + "type": "date" + }, + "dim": { + "type": "keyword", + "time_series_dimension": "true" + }, + "agg_metric": { + "type": "aggregate_metric_double", + "metrics": ["max"], + "default_metric": "max" + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "ip": { + "type": "ip" + }, + "network": { + "properties": { + "tx": { + "type": "long" + } + } + } + } + } + } + } + } + } + }"""); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/tsdb2/_doc"); + request.addParameter("refresh", "true"); + request.setJsonEntity("{\"@timestamp\": \"2021-04-28T18:50:04.467Z\", \"dim\": \"A\", \"agg_metric\": {\"max\": 10}}"); + client().performRequest(request); + } + + private void createTsdb1() throws IOException { + Request request = new Request("PUT", "/tsdb1"); + request.setJsonEntity(""" + { + "settings": { + "index": { + "mode": "time_series", + "routing_path": [ + "metricset", + "k8s.pod.uid" + ], + "time_series": { + "start_time": "2021-04-28T00:00:00Z", + "end_time": "2021-04-29T00:00:00Z" + } + } + }, + "mappings": { + "properties": { + "@timestamp": { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "uid": { + "type": "keyword", + "time_series_dimension": true + }, + "name": { + "type": "keyword" + }, + "ip": { + "type": "ip" + }, + "network": { + "properties": { + "tx": { + "type": "long", + "time_series_metric": "counter" + }, + "rx": { + "type": "long", + "time_series_metric": "counter" + } + } + } + } + } + } + } + } + } + }"""); + assertOK(client().performRequest(request)); + + request = new Request("POST", "/tsdb1/_doc"); + request.addParameter("refresh", "true"); + request.setJsonEntity( + "{\"@timestamp\": \"2021-04-28T18:50:04.467Z\", \"metricset\": \"pod\"," + + "\"k8s\": {\"pod\": {\"name\": \"cat\", \"uid\":\"947e4ced-1786-4e53-9e0c-5c447e959507\", \"ip\": \"10.10.55.1\"," + + "\"network\": {\"tx\": 2001818691, \"rx\": 802133794}}}}" + ); + client().performRequest(request); + } + private void executeQueryWithNextPage(String format, String expectedHeader, String expectedLineFormat) throws IOException { int size = 20; String[] docs = new String[size]; From 3055edfa9845bc5a5d66def3f773f6faf9ac3b73 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 6 Oct 2023 15:10:28 +0100 Subject: [PATCH 028/176] [ML] Change some ML aggregation exceptions from 500 to 400 status (#100393) AggregationExecutionException maps to a 500 REST status, and should not be used for situations where end user choices (for example query timerange or parameters) caused the exception. This change converts these exceptions to IllegalArgumentException within the ML plugin. --- .../java/org/elasticsearch/xpack/ml/aggs/MlAggsHelper.java | 3 +-- .../ml/aggs/categorization/CategorizationBytesRefHash.java | 3 +-- .../ml/aggs/correlation/BucketCorrelationAggregator.java | 3 +-- .../xpack/ml/aggs/correlation/CountCorrelationFunction.java | 5 ++--- .../elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java | 5 +---- .../xpack/ml/aggs/inference/InferencePipelineAggregator.java | 5 ++--- .../xpack/ml/aggs/kstest/BucketCountKSTestAggregator.java | 2 +- 7 files changed, 9 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/MlAggsHelper.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/MlAggsHelper.java index 876474e08485a..780841880a6c1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/MlAggsHelper.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/MlAggsHelper.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.ml.aggs; import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.InvalidAggregationPathException; @@ -76,7 +75,7 @@ public static Optional extractDoubleBucketedValues( bucketCount++; continue; } - throw new AggregationExecutionException( + throw new IllegalArgumentException( "missing or invalid bucket value found for path [" + bucketPath + "] in bucket [" diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizationBytesRefHash.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizationBytesRefHash.java index 23d95b4ba0f7f..58feb24480f87 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizationBytesRefHash.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizationBytesRefHash.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.util.BytesRefHash; import org.elasticsearch.core.Releasable; -import org.elasticsearch.search.aggregations.AggregationExecutionException; class CategorizationBytesRefHash implements Releasable { @@ -49,7 +48,7 @@ int put(BytesRef bytesRef) { return (int) (-1L - hash); } if (hash > Integer.MAX_VALUE) { - throw new AggregationExecutionException( + throw new IllegalArgumentException( LoggerMessageFormat.format( "more than [{}] unique terms encountered. " + "Consider restricting the documents queried or adding [{}] in the {} configuration", diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregator.java index 1d90f879526e2..02386acbd6134 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregator.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.ml.aggs.correlation; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -45,7 +44,7 @@ public InternalAggregation doReduce(Aggregations aggregations, AggregationReduce ) .orElse(null); if (bucketPathValue == null) { - throw new AggregationExecutionException( + throw new IllegalArgumentException( "unable to find valid bucket values in path [" + bucketsPaths()[0] + "] for agg [" + name() + "]" ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/CountCorrelationFunction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/CountCorrelationFunction.java index 8908fe303aa01..87cfd4bae7015 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/CountCorrelationFunction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/CountCorrelationFunction.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.MovingFunctions; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -99,7 +98,7 @@ public boolean equals(Object obj) { @Override public double execute(CountCorrelationIndicator y) { if (indicator.getExpectations().length != y.getExpectations().length) { - throw new AggregationExecutionException( + throw new IllegalArgumentException( "value lengths do not match; indicator.expectations [" + indicator.getExpectations().length + "] and number of buckets [" @@ -136,7 +135,7 @@ public double execute(CountCorrelationIndicator y) { } final double weight = MovingFunctions.sum(y.getExpectations()) / indicator.getDocCount(); if (weight > 1.0) { - throw new AggregationExecutionException( + throw new IllegalArgumentException( "doc_count of indicator must be larger than the total count of the correlating values indicator count [" + indicator.getDocCount() + "] correlating value total count [" diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java index 402f9d2eb9d22..5cb9bf543fd19 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java @@ -10,7 +10,6 @@ import org.apache.commons.math3.util.FastMath; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.bucket.terms.heuristic.NXYSignificanceHeuristic; import org.elasticsearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -175,9 +174,7 @@ public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long || docsContainTermInClass > Long.MAX_VALUE || allDocsNotInClass > Long.MAX_VALUE || docsContainTermNotInClass > Long.MAX_VALUE) { - throw new AggregationExecutionException( - "too many documents in background and foreground sets, further restrict sets for execution" - ); + throw new IllegalArgumentException("too many documents in background and foreground sets, further restrict sets for execution"); } double v1 = new LongBinomialDistribution((long) allDocsInClass, docsContainTermInClass / allDocsInClass).logProbability( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregator.java index c94eacad6fb86..ea01f07146ea6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregator.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.ml.aggs.inference; import org.elasticsearch.inference.InferenceResults; -import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -132,7 +131,7 @@ public static Object resolveBucketValue( return bucket.getProperty(agg.getName(), aggPathsList); } - private static AggregationExecutionException invalidAggTypeError(String aggPath, Object propertyValue) { + private static IllegalArgumentException invalidAggTypeError(String aggPath, Object propertyValue) { String msg = AbstractPipelineAggregationBuilder.BUCKETS_PATH_FIELD.getPreferredName() + " must reference either a number value, a single value numeric metric aggregation or a string: got [" @@ -143,6 +142,6 @@ private static AggregationExecutionException invalidAggTypeError(String aggPath, + "] at aggregation [" + aggPath + "]"; - return new AggregationExecutionException(msg); + return new IllegalArgumentException(msg); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregator.java index c23335a121e70..518b76aae3732 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregator.java @@ -237,7 +237,7 @@ public InternalAggregation doReduce(Aggregations aggregations, AggregationReduce } ); if (maybeBucketsValue.isPresent() == false) { - throw new AggregationExecutionException( + throw new IllegalArgumentException( "unable to find valid bucket values in bucket path [" + bucketsPaths()[0] + "] for agg [" + name() + "]" ); } From 6cde0df4636ac4385f46a8a9d6971310aaf2a8fd Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 6 Oct 2023 15:10:45 +0100 Subject: [PATCH 029/176] [ML] More checks and tests for parsing Inference processor config (#100335) Following on from #100205 this PR adds more tests and checks for corner cases when parsing the configuration. --- .../ingest/processors/inference.asciidoc | 3 +- .../ClassificationInferenceResultsTests.java | 9 +- .../results/ErrorInferenceResultsTests.java | 4 +- .../results/InferenceResultsTestCase.java | 52 +++++++- .../ml/inference/results/NerResultsTests.java | 9 +- ...lpClassificationInferenceResultsTests.java | 9 +- .../PyTorchPassThroughResultsTests.java | 7 +- ...uestionAnsweringInferenceResultsTests.java | 9 +- .../RegressionInferenceResultsTests.java | 7 +- .../results/TextEmbeddingResultsTests.java | 8 +- .../results/TextExpansionResultsTests.java | 7 +- .../TextSimilarityInferenceResultsTests.java | 9 +- .../results/WarningInferenceResultsTests.java | 4 +- .../inference/ingest/InferenceProcessor.java | 99 +++++++++++--- .../InferenceProcessorFactoryTests.java | 123 ++++++++++++++++-- .../ingest/InferenceProcessorTests.java | 111 +++++++++++++++- 16 files changed, 401 insertions(+), 69 deletions(-) diff --git a/docs/reference/ingest/processors/inference.asciidoc b/docs/reference/ingest/processors/inference.asciidoc index f0c029d99e14a..75b667e634cdb 100644 --- a/docs/reference/ingest/processors/inference.asciidoc +++ b/docs/reference/ingest/processors/inference.asciidoc @@ -17,10 +17,11 @@ ingested in the pipeline. |====== | Name | Required | Default | Description | `model_id` . | yes | - | (String) The ID or alias for the trained model, or the ID of the deployment. -| `input_output` | no | (List) Input fields for inference and output (destination) fields for the inference results. This options is incompatible with the `target_field` and `field_map` options. +| `input_output` | no | - | (List) Input fields for inference and output (destination) fields for the inference results. This options is incompatible with the `target_field` and `field_map` options. | `target_field` | no | `ml.inference.` | (String) Field added to incoming documents to contain results objects. | `field_map` | no | If defined the model's default field map | (Object) Maps the document field names to the known field names of the model. This mapping takes precedence over any default mappings provided in the model configuration. | `inference_config` | no | The default settings defined in the model | (Object) Contains the inference type and its options. +| `ignore_missing` | no | `false` | (Boolean) If `true` and any of the input fields defined in `input_ouput` are missing then those missing fields are quietly ignored, otherwise a missing field causes a failure. Only applies when using `input_output` configurations to explicitly list the input fields. include::common-options.asciidoc[] |====== diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResultsTests.java index 28e318c0dab48..a937fef23e4bc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResultsTests.java @@ -209,8 +209,13 @@ public void testToXContent() throws IOException { } @Override - void assertFieldValues(ClassificationInferenceResults createdInstance, IngestDocument document, String resultsField) { - String path = resultsField + "." + createdInstance.getResultsField(); + void assertFieldValues( + ClassificationInferenceResults createdInstance, + IngestDocument document, + String parentField, + String resultsField + ) { + String path = parentField + resultsField; switch (createdInstance.getPredictionFieldType()) { case NUMBER -> assertThat(document.getFieldValue(path, Double.class), equalTo(createdInstance.predictedValue())); case STRING -> assertThat(document.getFieldValue(path, String.class), equalTo(createdInstance.predictedValue())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/ErrorInferenceResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/ErrorInferenceResultsTests.java index e25b2da55b15b..20b2b4737c8b5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/ErrorInferenceResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/ErrorInferenceResultsTests.java @@ -34,7 +34,7 @@ protected ErrorInferenceResults mutateInstance(ErrorInferenceResults instance) t } @Override - void assertFieldValues(ErrorInferenceResults createdInstance, IngestDocument document, String resultsField) { - assertThat(document.getFieldValue(resultsField + ".error", String.class), equalTo(createdInstance.getException().getMessage())); + void assertFieldValues(ErrorInferenceResults createdInstance, IngestDocument document, String parentField, String resultsField) { + assertThat(document.getFieldValue(parentField + "error", String.class), equalTo(createdInstance.getException().getMessage())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/InferenceResultsTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/InferenceResultsTestCase.java index 27503547e5705..bda9eed40659c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/InferenceResultsTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/InferenceResultsTestCase.java @@ -18,6 +18,8 @@ import java.io.IOException; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; + abstract class InferenceResultsTestCase extends AbstractWireSerializingTestCase { public void testWriteToIngestDoc() throws IOException { @@ -34,11 +36,57 @@ public void testWriteToIngestDoc() throws IOException { document.setFieldValue(parentField, Map.of()); } InferenceResults.writeResult(inferenceResult, document, parentField, modelId); - assertFieldValues(inferenceResult, document, alreadyHasResult ? parentField + ".1" : parentField); + + String expectedOutputPath = alreadyHasResult ? parentField + ".1." : parentField + "."; + + assertThat( + document.getFieldValue(expectedOutputPath + InferenceResults.MODEL_ID_RESULTS_FIELD, String.class), + equalTo(modelId) + ); + if (inferenceResult instanceof NlpInferenceResults nlpInferenceResults && nlpInferenceResults.isTruncated()) { + assertTrue(document.getFieldValue(expectedOutputPath + "is_truncated", Boolean.class)); + } + + assertFieldValues(inferenceResult, document, expectedOutputPath, inferenceResult.getResultsField()); + } + } + + private void testWriteToIngestDocField() throws IOException { + for (int i = 0; i < NUMBER_OF_TEST_RUNS; ++i) { + T inferenceResult = createTestInstance(); + if (randomBoolean()) { + inferenceResult = copyInstance(inferenceResult, TransportVersion.current()); + } + IngestDocument document = TestIngestDocument.emptyIngestDocument(); + String outputField = randomAlphaOfLength(10); + String modelId = randomAlphaOfLength(10); + String parentField = randomBoolean() ? null : randomAlphaOfLength(10); + boolean writeModelId = randomBoolean(); + + boolean alreadyHasResult = randomBoolean(); + if (alreadyHasResult && parentField != null) { + document.setFieldValue(parentField, Map.of()); + } + InferenceResults.writeResultToField(inferenceResult, document, parentField, outputField, modelId, writeModelId); + + String expectedOutputPath = parentField == null ? "" : parentField + "."; + if (alreadyHasResult && parentField != null) { + expectedOutputPath = expectedOutputPath + "1."; + } + + if (writeModelId) { + String modelIdPath = expectedOutputPath + InferenceResults.MODEL_ID_RESULTS_FIELD; + assertThat(document.getFieldValue(modelIdPath, String.class), equalTo(modelId)); + } + if (inferenceResult instanceof NlpInferenceResults nlpInferenceResults && nlpInferenceResults.isTruncated()) { + assertTrue(document.getFieldValue(expectedOutputPath + "is_truncated", Boolean.class)); + } + + assertFieldValues(inferenceResult, document, expectedOutputPath, outputField); } } - abstract void assertFieldValues(T createdInstance, IngestDocument document, String resultsField); + abstract void assertFieldValues(T createdInstance, IngestDocument document, String parentField, String resultsField); public void testWriteToDocAndSerialize() throws IOException { for (int i = 0; i < NUMBER_OF_TEST_RUNS; ++i) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/NerResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/NerResultsTests.java index 68c69ff67fa48..4be49807d27b0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/NerResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/NerResultsTests.java @@ -92,15 +92,12 @@ public void testAsMap() { @Override @SuppressWarnings("unchecked") - void assertFieldValues(NerResults createdInstance, IngestDocument document, String resultsField) { - assertThat( - document.getFieldValue(resultsField + "." + createdInstance.getResultsField(), String.class), - equalTo(createdInstance.getAnnotatedResult()) - ); + void assertFieldValues(NerResults createdInstance, IngestDocument document, String parentField, String resultsField) { + assertThat(document.getFieldValue(parentField + resultsField, String.class), equalTo(createdInstance.getAnnotatedResult())); if (createdInstance.getEntityGroups().size() > 0) { List> resultList = (List>) document.getFieldValue( - resultsField + "." + ENTITY_FIELD, + parentField + ENTITY_FIELD, List.class ); assertThat(resultList.size(), equalTo(createdInstance.getEntityGroups().size())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/NlpClassificationInferenceResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/NlpClassificationInferenceResultsTests.java index ac3cb638d88d1..f05b8ac3d8eab 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/NlpClassificationInferenceResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/NlpClassificationInferenceResultsTests.java @@ -79,8 +79,13 @@ protected Writeable.Reader instanceReader() { } @Override - void assertFieldValues(NlpClassificationInferenceResults createdInstance, IngestDocument document, String resultsField) { - String path = resultsField + "." + createdInstance.getResultsField(); + void assertFieldValues( + NlpClassificationInferenceResults createdInstance, + IngestDocument document, + String parentField, + String resultsField + ) { + String path = parentField + resultsField; assertThat(document.getFieldValue(path, String.class), equalTo(createdInstance.predictedValue())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/PyTorchPassThroughResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/PyTorchPassThroughResultsTests.java index c2386010a8f67..e6b38a08a75ba 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/PyTorchPassThroughResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/PyTorchPassThroughResultsTests.java @@ -58,10 +58,7 @@ public void testAsMap() { } @Override - void assertFieldValues(PyTorchPassThroughResults createdInstance, IngestDocument document, String resultsField) { - assertArrayEquals( - createdInstance.getInference(), - document.getFieldValue(resultsField + "." + createdInstance.getResultsField(), double[][].class) - ); + void assertFieldValues(PyTorchPassThroughResults createdInstance, IngestDocument document, String parentField, String resultsField) { + assertArrayEquals(createdInstance.getInference(), document.getFieldValue(parentField + resultsField, double[][].class)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/QuestionAnsweringInferenceResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/QuestionAnsweringInferenceResultsTests.java index c9c65ea3f3538..29e7a5627cdd3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/QuestionAnsweringInferenceResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/QuestionAnsweringInferenceResultsTests.java @@ -83,8 +83,13 @@ protected Writeable.Reader instanceReader() { } @Override - void assertFieldValues(QuestionAnsweringInferenceResults createdInstance, IngestDocument document, String resultsField) { - String path = resultsField + "." + createdInstance.getResultsField(); + void assertFieldValues( + QuestionAnsweringInferenceResults createdInstance, + IngestDocument document, + String parentField, + String resultsField + ) { + String path = parentField + resultsField; assertThat(document.getFieldValue(path, String.class), equalTo(createdInstance.predictedValue())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/RegressionInferenceResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/RegressionInferenceResultsTests.java index 27a07e8f996f7..9eef7a42da9a8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/RegressionInferenceResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/RegressionInferenceResultsTests.java @@ -95,10 +95,7 @@ public void testToXContent() { } @Override - void assertFieldValues(RegressionInferenceResults createdInstance, IngestDocument document, String resultsField) { - assertThat( - document.getFieldValue(resultsField + "." + createdInstance.getResultsField(), Double.class), - closeTo(createdInstance.value(), 1e-10) - ); + void assertFieldValues(RegressionInferenceResults createdInstance, IngestDocument document, String parentField, String resultsField) { + assertThat(document.getFieldValue(parentField + resultsField, Double.class), closeTo(createdInstance.value(), 1e-10)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextEmbeddingResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextEmbeddingResultsTests.java index d29e79698e2c9..fd3ac7f8c0d12 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextEmbeddingResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextEmbeddingResultsTests.java @@ -55,11 +55,7 @@ public void testAsMap() { } @Override - void assertFieldValues(TextEmbeddingResults createdInstance, IngestDocument document, String resultsField) { - assertArrayEquals( - document.getFieldValue(resultsField + "." + createdInstance.getResultsField(), double[].class), - createdInstance.getInference(), - 1e-10 - ); + void assertFieldValues(TextEmbeddingResults createdInstance, IngestDocument document, String parentField, String resultsField) { + assertArrayEquals(document.getFieldValue(parentField + resultsField, double[].class), createdInstance.getInference(), 1e-10); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextExpansionResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextExpansionResultsTests.java index c3b2fbf6fb556..82487960dfe8f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextExpansionResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextExpansionResultsTests.java @@ -47,11 +47,8 @@ protected TextExpansionResults mutateInstance(TextExpansionResults instance) { @Override @SuppressWarnings("unchecked") - void assertFieldValues(TextExpansionResults createdInstance, IngestDocument document, String resultsField) { - var ingestedTokens = (Map) document.getFieldValue( - resultsField + '.' + createdInstance.getResultsField(), - Map.class - ); + void assertFieldValues(TextExpansionResults createdInstance, IngestDocument document, String parentField, String resultsField) { + var ingestedTokens = (Map) document.getFieldValue(parentField + resultsField, Map.class); var tokenMap = createdInstance.getWeightedTokens() .stream() .collect(Collectors.toMap(TextExpansionResults.WeightedToken::token, TextExpansionResults.WeightedToken::weight)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResultsTests.java index e543e04a01085..b72f89bf0ae97 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResultsTests.java @@ -33,8 +33,13 @@ protected Writeable.Reader instanceReader() { } @Override - void assertFieldValues(TextSimilarityInferenceResults createdInstance, IngestDocument document, String resultsField) { - String path = resultsField + "." + createdInstance.getResultsField(); + void assertFieldValues( + TextSimilarityInferenceResults createdInstance, + IngestDocument document, + String parentField, + String resultsField + ) { + String path = parentField + resultsField; assertThat(document.getFieldValue(path, Double.class), equalTo(createdInstance.predictedValue())); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/WarningInferenceResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/WarningInferenceResultsTests.java index 68379f888d11b..594fffc0c91f4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/WarningInferenceResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/results/WarningInferenceResultsTests.java @@ -33,7 +33,7 @@ protected Writeable.Reader instanceReader() { } @Override - void assertFieldValues(WarningInferenceResults createdInstance, IngestDocument document, String resultsField) { - assertThat(document.getFieldValue(resultsField + ".warning", String.class), equalTo(createdInstance.getWarning())); + void assertFieldValues(WarningInferenceResults createdInstance, IngestDocument document, String parentField, String resultsField) { + assertThat(document.getFieldValue(parentField + "warning", String.class), equalTo(createdInstance.getWarning())); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java index ef78078d1bbcd..905317713263e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessor.java @@ -87,6 +87,7 @@ public class InferenceProcessor extends AbstractProcessor { public static final String TYPE = "inference"; public static final String MODEL_ID = "model_id"; public static final String INFERENCE_CONFIG = "inference_config"; + public static final String IGNORE_MISSING = "ignore_missing"; // target field style mappings public static final String TARGET_FIELD = "target_field"; @@ -106,9 +107,10 @@ public static InferenceProcessor fromInputFieldConfiguration( String description, String modelId, InferenceConfigUpdate inferenceConfig, - List inputs + List inputs, + boolean ignoreMissing ) { - return new InferenceProcessor(client, auditor, tag, description, null, modelId, inferenceConfig, null, inputs, true); + return new InferenceProcessor(client, auditor, tag, description, null, modelId, inferenceConfig, null, inputs, true, ignoreMissing); } public static InferenceProcessor fromTargetFieldConfiguration( @@ -121,7 +123,20 @@ public static InferenceProcessor fromTargetFieldConfiguration( InferenceConfigUpdate inferenceConfig, Map fieldMap ) { - return new InferenceProcessor(client, auditor, tag, description, targetField, modelId, inferenceConfig, fieldMap, null, false); + // ignore_missing only applies to when using the input_field config + return new InferenceProcessor( + client, + auditor, + tag, + description, + targetField, + modelId, + inferenceConfig, + fieldMap, + null, + false, + false + ); } private final Client client; @@ -134,6 +149,7 @@ public static InferenceProcessor fromTargetFieldConfiguration( private final AtomicBoolean shouldAudit = new AtomicBoolean(true); private final List inputs; private final boolean configuredWithInputsFields; + private final boolean ignoreMissing; private InferenceProcessor( Client client, @@ -145,7 +161,8 @@ private InferenceProcessor( InferenceConfigUpdate inferenceConfig, Map fieldMap, List inputs, - boolean configuredWithInputsFields + boolean configuredWithInputsFields, + boolean ignoreMissing ) { super(tag, description); this.configuredWithInputsFields = configuredWithInputsFields; @@ -153,6 +170,7 @@ private InferenceProcessor( this.auditor = ExceptionsHelper.requireNonNull(auditor, "auditor"); this.modelId = ExceptionsHelper.requireNonNull(modelId, MODEL_ID); this.inferenceConfig = ExceptionsHelper.requireNonNull(inferenceConfig, INFERENCE_CONFIG); + this.ignoreMissing = ignoreMissing; if (configuredWithInputsFields) { this.inputs = ExceptionsHelper.requireNonNull(inputs, INPUT_OUTPUT); @@ -205,23 +223,36 @@ void handleResponse(InferModelAction.Response response, IngestDocument ingestDoc } InferModelAction.Request buildRequest(IngestDocument ingestDocument) { - Map fields = new HashMap<>(ingestDocument.getSourceAndMetadata()); - // Add ingestMetadata as previous processors might have added metadata from which we are predicting (see: foreach processor) - if (ingestDocument.getIngestMetadata().isEmpty() == false) { - fields.put(INGEST_KEY, ingestDocument.getIngestMetadata()); - } - if (configuredWithInputsFields) { + // ignore missing only applies when using an input field list List requestInputs = new ArrayList<>(); for (var inputFields : inputs) { - var lookup = (String) fields.get(inputFields.inputField); - if (lookup == null) { - lookup = ""; // need to send a non-null request to the same number of results back + try { + var inputText = ingestDocument.getFieldValue(inputFields.inputField, String.class, ignoreMissing); + // field is missing and ignoreMissing == true then a null value is returned. + if (inputText == null) { + inputText = ""; // need to send a non-null request to the same number of results back + } + requestInputs.add(inputText); + } catch (IllegalArgumentException e) { + if (ingestDocument.hasField(inputFields.inputField())) { + // field is present but of the wrong type, translate to a more meaningful message + throw new IllegalArgumentException( + "input field [" + inputFields.inputField + "] cannot be processed because it is not a text field" + ); + } else { + throw e; + } } - requestInputs.add(lookup); } return InferModelAction.Request.forTextInput(modelId, inferenceConfig, requestInputs); } else { + Map fields = new HashMap<>(ingestDocument.getSourceAndMetadata()); + // Add ingestMetadata as previous processors might have added metadata from which we are predicting (see: foreach processor) + if (ingestDocument.getIngestMetadata().isEmpty() == false) { + fields.put(INGEST_KEY, ingestDocument.getIngestMetadata()); + } + LocalModel.mapFieldsIfNecessary(fields, fieldMap); return InferModelAction.Request.forIngestDocs(modelId, List.of(fields), inferenceConfig, previouslyLicensed); } @@ -373,11 +404,13 @@ public InferenceProcessor create( inferenceConfigUpdate = inferenceConfigUpdateFromMap(inferenceConfigMap); } - List> inputs = ConfigurationUtils.readOptionalList(TYPE, tag, config, INPUT_OUTPUT); + List> inputs = readOptionalInputOutPutConfig(config, tag); boolean configuredWithInputFields = inputs != null; if (configuredWithInputFields) { // new style input/output configuration var parsedInputs = parseInputFields(tag, inputs); + // ignore missing only applies to input field config + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, tag, config, IGNORE_MISSING, false); // validate incompatible settings are not present String targetField = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, TARGET_FIELD); @@ -414,7 +447,16 @@ public InferenceProcessor create( ); } - return fromInputFieldConfiguration(client, auditor, tag, description, modelId, inferenceConfigUpdate, parsedInputs); + return fromInputFieldConfiguration( + client, + auditor, + tag, + description, + modelId, + inferenceConfigUpdate, + parsedInputs, + ignoreMissing + ); } else { // old style configuration with target field String defaultTargetField = tag == null ? DEFAULT_TARGET_FIELD : DEFAULT_TARGET_FIELD + "." + tag; @@ -553,7 +595,7 @@ void checkSupportedVersion(InferenceConfig config) { List parseInputFields(String tag, List> inputs) { if (inputs.isEmpty()) { - throw newConfigurationException(TYPE, tag, INPUT_OUTPUT, "cannot be empty at least one is required"); + throw newConfigurationException(TYPE, tag, INPUT_OUTPUT, "property cannot be empty at least one is required"); } var inputNames = new HashSet(); var outputNames = new HashSet(); @@ -582,6 +624,29 @@ List parseInputFields(String tag, List> inputs) return parsedInputs; } + @SuppressWarnings("unchecked") + List> readOptionalInputOutPutConfig(Map config, String tag) { + Object inputOutputs = config.remove(INPUT_OUTPUT); + if (inputOutputs == null) { + return null; + } + + // input_output may be a single map or a list of maps + if (inputOutputs instanceof List inputOutputList) { + if (inputOutputList.isEmpty() == false) { + // check it is a list of maps + if (inputOutputList.get(0) instanceof Map == false) { + throw ConfigurationUtils.newConfigurationException(TYPE, tag, INPUT_OUTPUT, "property isn't a list of maps"); + } + } + return (List>) inputOutputList; + } else if (inputOutputs instanceof Map) { + return List.of((Map) inputOutputs); + } else { + throw ConfigurationUtils.newConfigurationException(TYPE, tag, INPUT_OUTPUT, "property isn't a map or list of maps"); + } + } + private ElasticsearchException duplicatedFieldNameError(String property, String fieldName, String tag) { return newConfigurationException(TYPE, tag, property, "names must be unique but [" + fieldName + "] is repeated"); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorFactoryTests.java index 07d27a1b1bbe8..a8d3af2efe7cd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorFactoryTests.java @@ -647,10 +647,16 @@ public void testCreateProcessorWithInputFields() { randomBoolean() ); - Map inputMap = new HashMap<>() { + Map inputMap1 = new HashMap<>() { { - put(InferenceProcessor.INPUT_FIELD, "in"); - put(InferenceProcessor.OUTPUT_FIELD, "out"); + put(InferenceProcessor.INPUT_FIELD, "in1"); + put(InferenceProcessor.OUTPUT_FIELD, "out1"); + } + }; + Map inputMap2 = new HashMap<>() { + { + put(InferenceProcessor.INPUT_FIELD, "in2"); + put(InferenceProcessor.OUTPUT_FIELD, "out2"); } }; @@ -671,8 +677,7 @@ public void testCreateProcessorWithInputFields() { Map config = new HashMap<>() { { put(InferenceProcessor.MODEL_ID, "my_model"); - put(InferenceProcessor.INPUT_OUTPUT, List.of(inputMap)); - put(InferenceProcessor.INFERENCE_CONFIG, Collections.singletonMap(inferenceConfigType, Collections.emptyMap())); + put(InferenceProcessor.INPUT_OUTPUT, List.of(inputMap1, inputMap2)); } }; // create valid inference configs with required fields @@ -693,13 +698,115 @@ public void testCreateProcessorWithInputFields() { assertTrue(inferenceProcessor.isConfiguredWithInputsFields()); var inputs = inferenceProcessor.getInputs(); - assertThat(inputs, hasSize(1)); - assertEquals(inputs.get(0), new InferenceProcessor.Factory.InputConfig("in", null, "out", Map.of())); + assertThat(inputs, hasSize(2)); + assertEquals(inputs.get(0), new InferenceProcessor.Factory.InputConfig("in1", null, "out1", Map.of())); + assertEquals(inputs.get(1), new InferenceProcessor.Factory.InputConfig("in2", null, "out2", Map.of())); assertNull(inferenceProcessor.getFieldMap()); assertNull(inferenceProcessor.getTargetField()); } + public void testCreateProcessorWithInputFieldSingleOrList() { + InferenceProcessor.Factory processorFactory = new InferenceProcessor.Factory( + client, + clusterService, + Settings.EMPTY, + randomBoolean() + ); + + for (var isList : new boolean[] { true, false }) { + Map inputMap = new HashMap<>() { + { + put(InferenceProcessor.INPUT_FIELD, "in"); + put(InferenceProcessor.OUTPUT_FIELD, "out"); + } + }; + + Map config = new HashMap<>(); + config.put(InferenceProcessor.MODEL_ID, "my_model"); + if (isList) { + config.put(InferenceProcessor.INPUT_OUTPUT, List.of(inputMap)); + } else { + config.put(InferenceProcessor.INPUT_OUTPUT, inputMap); + } + + if (randomBoolean()) { + config.put( + InferenceProcessor.INFERENCE_CONFIG, + Collections.singletonMap(TextExpansionConfigUpdate.NAME, Collections.emptyMap()) + ); + } + + var inferenceProcessor = processorFactory.create(Collections.emptyMap(), "processor_with_single_input", null, config); + assertEquals("my_model", inferenceProcessor.getModelId()); + assertTrue(inferenceProcessor.isConfiguredWithInputsFields()); + + var inputs = inferenceProcessor.getInputs(); + assertThat(inputs, hasSize(1)); + assertEquals(inputs.get(0), new InferenceProcessor.Factory.InputConfig("in", null, "out", Map.of())); + + assertNull(inferenceProcessor.getFieldMap()); + assertNull(inferenceProcessor.getTargetField()); + } + } + + public void testCreateProcessorWithInputFieldWrongType() { + InferenceProcessor.Factory processorFactory = new InferenceProcessor.Factory( + client, + clusterService, + Settings.EMPTY, + randomBoolean() + ); + + { + Map config = new HashMap<>(); + config.put(InferenceProcessor.MODEL_ID, "my_model"); + config.put(InferenceProcessor.INPUT_OUTPUT, List.of(1, 2, 3)); + + var e = expectThrows( + ElasticsearchParseException.class, + () -> processorFactory.create(Collections.emptyMap(), "processor_with_bad_config", null, config) + ); + assertThat(e.getMessage(), containsString("[input_output] property isn't a list of maps")); + } + { + Map config = new HashMap<>(); + config.put(InferenceProcessor.MODEL_ID, "my_model"); + config.put(InferenceProcessor.INPUT_OUTPUT, Boolean.TRUE); + + var e = expectThrows( + ElasticsearchParseException.class, + () -> processorFactory.create(Collections.emptyMap(), "processor_with_bad_config", null, config) + ); + assertThat(e.getMessage(), containsString("[input_output] property isn't a map or list of maps")); + } + { + Map badMap = new HashMap<>(); + badMap.put(Boolean.TRUE, "foo"); + Map config = new HashMap<>(); + config.put(InferenceProcessor.MODEL_ID, "my_model"); + config.put(InferenceProcessor.INPUT_OUTPUT, badMap); + + var e = expectThrows( + ElasticsearchParseException.class, + () -> processorFactory.create(Collections.emptyMap(), "processor_with_bad_config", null, config) + ); + assertThat(e.getMessage(), containsString("[input_field] required property is missing")); + } + { + // empty list + Map config = new HashMap<>(); + config.put(InferenceProcessor.MODEL_ID, "my_model"); + config.put(InferenceProcessor.INPUT_OUTPUT, List.of()); + + var e = expectThrows( + ElasticsearchParseException.class, + () -> processorFactory.create(Collections.emptyMap(), "processor_with_bad_config", null, config) + ); + assertThat(e.getMessage(), containsString("[input_output] property cannot be empty at least one is required")); + } + } + public void testParsingInputFields() { InferenceProcessor.Factory processorFactory = new InferenceProcessor.Factory( client, @@ -785,7 +892,7 @@ public void testParsingInputFieldsGivenNoInputs() { ); var e = expectThrows(ElasticsearchParseException.class, () -> processorFactory.parseInputFields("my_processor", List.of())); - assertThat(e.getMessage(), containsString("[input_output] cannot be empty at least one is required")); + assertThat(e.getMessage(), containsString("[input_output] property cannot be empty at least one is required")); } private static ClusterState buildClusterStateWithModelReferences(String... modelId) throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorTests.java index f85b5e687ac3d..a68084aa6eb28 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.core.ml.inference.results.WarningInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfigUpdate; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.EmptyConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.PredictionFieldType; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.RegressionConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.RegressionConfigUpdate; @@ -34,6 +35,7 @@ import java.util.Map; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -506,7 +508,8 @@ public void testMutateDocumentWithInputFields() { "description", modelId, new RegressionConfigUpdate("foo", null), - inputs + inputs, + randomBoolean() ); IngestDocument document = TestIngestDocument.emptyIngestDocument(); @@ -535,7 +538,8 @@ public void testMutateDocumentWithInputFieldsNested() { "description", modelId, new RegressionConfigUpdate("foo", null), - inputs + inputs, + randomBoolean() ); IngestDocument document = TestIngestDocument.emptyIngestDocument(); @@ -545,6 +549,8 @@ public void testMutateDocumentWithInputFieldsNested() { InferModelAction.Response response = new InferModelAction.Response(List.of(teResult1, teResult2), modelId, true); inferenceProcessor.mutateDocument(response, document); + assertEquals(modelId, document.getFieldValue("ml.results.model_id", String.class)); + var bodyTokens = document.getFieldValue("ml.results.body_tokens", HashMap.class); assertEquals(teResult1.getWeightedTokens().size(), bodyTokens.entrySet().size()); if (teResult1.getWeightedTokens().isEmpty() == false) { @@ -564,4 +570,105 @@ public void testMutateDocumentWithInputFieldsNested() { ); } } + + public void testBuildRequestWithInputFields() { + String modelId = "elser"; + List inputs = new ArrayList<>(); + inputs.add(new InferenceProcessor.Factory.InputConfig("body.text", "ml.results", "body_tokens", Map.of())); + inputs.add(new InferenceProcessor.Factory.InputConfig("title.text", "ml.results", "title_tokens", Map.of())); + + InferenceProcessor inferenceProcessor = InferenceProcessor.fromInputFieldConfiguration( + client, + auditor, + "my_processor_tag", + "description", + modelId, + new EmptyConfigUpdate(), + inputs, + randomBoolean() + ); + + IngestDocument document = TestIngestDocument.emptyIngestDocument(); + document.setFieldValue("body.text", "body_text"); + document.setFieldValue("title.text", "title_text"); + document.setFieldValue("unrelated", "text"); + + var request = inferenceProcessor.buildRequest(document); + assertTrue(request.getObjectsToInfer().isEmpty()); + var requestInputs = request.getTextInput(); + assertThat(requestInputs, contains("body_text", "title_text")); + } + + public void testBuildRequestWithInputFields_WrongType() { + String modelId = "elser"; + List inputs = new ArrayList<>(); + inputs.add(new InferenceProcessor.Factory.InputConfig("not_a_string", "ml.results", "tokens", Map.of())); + + InferenceProcessor inferenceProcessor = InferenceProcessor.fromInputFieldConfiguration( + client, + auditor, + "my_processor_tag", + "description", + modelId, + new EmptyConfigUpdate(), + inputs, + randomBoolean() + ); + + IngestDocument document = TestIngestDocument.emptyIngestDocument(); + document.setFieldValue("not_a_string", Boolean.TRUE); + document.setFieldValue("unrelated", "text"); + + var e = expectThrows(IllegalArgumentException.class, () -> inferenceProcessor.buildRequest(document)); + assertThat(e.getMessage(), containsString("input field [not_a_string] cannot be processed because it is not a text field")); + } + + public void testBuildRequestWithInputFields_MissingField() { + String modelId = "elser"; + List inputs = new ArrayList<>(); + inputs.add(new InferenceProcessor.Factory.InputConfig("body.text", "ml.results", "body_tokens", Map.of())); + inputs.add(new InferenceProcessor.Factory.InputConfig("title.text", "ml.results", "title_tokens", Map.of())); + + { + InferenceProcessor inferenceProcessor = InferenceProcessor.fromInputFieldConfiguration( + client, + auditor, + "my_processor_tag", + "description", + modelId, + new EmptyConfigUpdate(), + inputs, + false + ); + + IngestDocument document = TestIngestDocument.emptyIngestDocument(); + document.setFieldValue("body.text", "body_text"); + document.setFieldValue("unrelated", "text"); + + var e = expectThrows(IllegalArgumentException.class, () -> inferenceProcessor.buildRequest(document)); + assertThat(e.getMessage(), containsString("field [title] not present as part of path [title.text]")); + } + + // same test with ignore_missing == true + { + InferenceProcessor inferenceProcessor = InferenceProcessor.fromInputFieldConfiguration( + client, + auditor, + "my_processor_tag", + "description", + modelId, + new EmptyConfigUpdate(), + inputs, + true + ); + + IngestDocument document = TestIngestDocument.emptyIngestDocument(); + document.setFieldValue("body.text", "body_text"); + document.setFieldValue("unrelated", 1.0); + + var request = inferenceProcessor.buildRequest(document); + var requestInputs = request.getTextInput(); + assertThat(requestInputs, contains("body_text", "")); + } + } } From 69d87e1bfdfd5b3d5cb2b3eefb882c247009407e Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 6 Oct 2023 17:04:46 +0200 Subject: [PATCH 030/176] Rework beats artifacts resolution in docker build (#100157) * Rework metricbeat artifacts resolution to avoid misleading error message * Pass File object to beats repo uri to workaround parsing problems --- distribution/docker/build.gradle | 44 ++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index c9d8531db9ed0..7b7040dfd7098 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -38,30 +38,36 @@ repositories { } } -if(useDra == false) { +if (useDra == false) { repositories { - // Cloud builds bundle some beats - ivy { - name = 'beats' - if (useLocalArtifacts) { - url "file://${buildDir}/artifacts/" - patternLayout { - artifact '/[organisation]/[module]-[revision]-[classifier].[ext]' - } - } else { - url "https://artifacts-snapshot.elastic.co/" - patternLayout { - if (VersionProperties.isElasticsearchSnapshot()) { - artifact '/[organization]/[revision]/downloads/[organization]/[module]/[module]-[revision]-[classifier].[ext]' + exclusiveContent { + // Cloud builds bundle some beats + forRepository { + ivy { + name = 'beats' + if (useLocalArtifacts) { + url getLayout().getBuildDirectory().dir("artifacts").get().asFile + patternLayout { + artifact '/[organisation]/[module]-[revision]-[classifier].[ext]' + } } else { - // When building locally we always use snapshot artifacts even if passing `-Dbuild.snapshot=false`. - // Release builds are always done with a local repo. - artifact '/[organization]/[revision]-SNAPSHOT/downloads/[organization]/[module]/[module]-[revision]-SNAPSHOT-[classifier].[ext]' + url "https://artifacts-snapshot.elastic.co/" + patternLayout { + if (VersionProperties.isElasticsearchSnapshot()) { + artifact '/[organization]/[revision]/downloads/[organization]/[module]/[module]-[revision]-[classifier].[ext]' + } else { + // When building locally we always use snapshot artifacts even if passing `-Dbuild.snapshot=false`. + // Release builds are always done with a local repo. + artifact '/[organization]/[revision]-SNAPSHOT/downloads/[organization]/[module]/[module]-[revision]-SNAPSHOT-[classifier].[ext]' + } + } } + metadataSources { artifact() } } } - metadataSources { artifact() } - content { includeGroup 'beats' } + filter { + includeGroup("beats") + } } } } From 09d599fdacdca90b58f2b9e42530279a69c97beb Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 6 Oct 2023 16:28:25 +0100 Subject: [PATCH 031/176] Improve RepositoryData BwC (#100401) - Reinstates the few working parts of MultiVersionRepositoryAccessIT - Fixes the version bound for compatibility with v8.9.x - Removes an assertion that blocks fixing BwC properly in 8.11.0 Relates #98454 --- docs/changelog/100401.yaml | 5 +++++ .../MultiVersionRepositoryAccessIT.java | 20 +++++++++++++++++-- .../repositories/RepositoryData.java | 15 ++++++++++---- 3 files changed, 34 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/100401.yaml diff --git a/docs/changelog/100401.yaml b/docs/changelog/100401.yaml new file mode 100644 index 0000000000000..93528baa01dc3 --- /dev/null +++ b/docs/changelog/100401.yaml @@ -0,0 +1,5 @@ +pr: 100401 +summary: Improve `RepositoryData` BwC +area: Snapshot/Restore +type: bug +issues: [] diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index b4f96fb729814..db99087b07140 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -8,8 +8,8 @@ package org.elasticsearch.upgrades; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -46,7 +46,6 @@ * */ @SuppressWarnings("removal") -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/98454") public class MultiVersionRepositoryAccessIT extends ESRestTestCase { private enum TestStep { @@ -79,6 +78,8 @@ public static TestStep parse(String value) { private static final TestStep TEST_STEP = TestStep.parse(System.getProperty("tests.rest.suite")); + private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); + @Override protected boolean preserveSnapshotsUponCompletion() { return true; @@ -95,6 +96,11 @@ protected boolean preserveTemplatesUponCompletion() { } public void testCreateAndRestoreSnapshot() throws IOException { + assumeTrue( + "test does not work for downgrades before 8.10.0, see https://github.com/elastic/elasticsearch/issues/98454", + OLD_CLUSTER_VERSION.onOrAfter(Version.V_8_10_0) + ); + final String repoName = getTestName(); try { final int shards = 3; @@ -141,6 +147,11 @@ public void testCreateAndRestoreSnapshot() throws IOException { } public void testReadOnlyRepo() throws IOException { + assumeTrue( + "test does not fully work for downgrades before 8.10.0, see https://github.com/elastic/elasticsearch/issues/98454", + OLD_CLUSTER_VERSION.onOrAfter(Version.V_8_10_0) || TEST_STEP != TestStep.STEP3_OLD_CLUSTER + ); + final String repoName = getTestName(); final int shards = 3; final boolean readOnly = TEST_STEP.ordinal() > 1; // only restore from read-only repo in steps 3 and 4 @@ -174,6 +185,11 @@ public void testReadOnlyRepo() throws IOException { ); public void testUpgradeMovesRepoToNewMetaVersion() throws IOException { + assumeTrue( + "test does not work for downgrades before 8.10.0, see https://github.com/elastic/elasticsearch/issues/98454", + OLD_CLUSTER_VERSION.onOrAfter(Version.V_8_10_0) + ); + final String repoName = getTestName(); try { final int shards = 3; diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index f70676aa2a2fc..4f8cc64655c70 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -19,6 +19,8 @@ import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotState; @@ -740,7 +742,7 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final } final IndexVersion version = snapshotDetails.getVersion(); if (version != null) { - if (version.before(IndexVersion.V_8_9_0)) { + if (version.before(IndexVersion.V_8_10_0)) { builder.field(VERSION, Version.fromId(version.id()).toString()); } else { builder.field(VERSION, version.id()); @@ -953,14 +955,19 @@ private static void parseSnapshots( } } + private static final Logger logger = LogManager.getLogger(RepositoryData.class); + private static IndexVersion parseIndexVersion(XContentParser.Token token, XContentParser parser) throws IOException { if (token == XContentParser.Token.VALUE_NUMBER) { return IndexVersion.fromId(parser.intValue()); } else { XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_STRING, token, parser); - Version v = Version.fromString(parser.text()); - assert v.before(Version.V_8_10_0); - return IndexVersion.fromId(v.id); + final var versionStr = parser.text(); + final var versionId = Version.fromString(versionStr).id; + if (versionId > 8_11_00_99 && versionId < 8_500_000) { + logger.error("found impossible string index version [{}] with id [{}]", versionStr, versionId); + } + return IndexVersion.fromId(versionId); } } From f4d53bcc6102891374c670b61ed7d5b3ab5e47c3 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Fri, 6 Oct 2023 11:52:16 -0400 Subject: [PATCH 032/176] [buildkite] Fix backport PR pipeline generation (#100427) --- .buildkite/scripts/pull-request/pipeline.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.buildkite/scripts/pull-request/pipeline.ts b/.buildkite/scripts/pull-request/pipeline.ts index c4e12f2aa48fe..a6a3df6ff2aff 100644 --- a/.buildkite/scripts/pull-request/pipeline.ts +++ b/.buildkite/scripts/pull-request/pipeline.ts @@ -116,7 +116,10 @@ export const generatePipelines = ( .filter((x) => x); if (!changedFiles?.length) { - const mergeBase = execSync(`git merge-base ${process.env["GITHUB_PR_TARGET_BRANCH"]} HEAD`, { cwd: PROJECT_ROOT }) + const mergeBase = execSync( + `git fetch origin ${process.env["GITHUB_PR_TARGET_BRANCH"]}; git merge-base origin/${process.env["GITHUB_PR_TARGET_BRANCH"]} HEAD`, + { cwd: PROJECT_ROOT } + ) .toString() .trim(); From f0a63a2e7c4545c3bcdcacedb142b27041b062d0 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 6 Oct 2023 09:18:59 -0700 Subject: [PATCH 033/176] Mute SnapshotResiliencyTests --- .../org/elasticsearch/snapshots/SnapshotResiliencyTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 1b5ff3f39be22..a04273186dca7 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -227,6 +228,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100434") public class SnapshotResiliencyTests extends ESTestCase { private DeterministicTaskQueue deterministicTaskQueue; From 571d4a4d834e7465999cea1082c0f7fb1a8df9c5 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 6 Oct 2023 17:19:54 +0100 Subject: [PATCH 034/176] [ML] Only log long time unassigned message if not empty (#100417) Followup to #100154 The log message about unassigned jobs is simply spam if there are no unassigned jobs, so it should only be logged if there's at least one item in the list. --- .../org/elasticsearch/xpack/ml/MlAssignmentNotifier.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java index 2378f0becd959..409c1e5d24fcd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java @@ -267,8 +267,9 @@ private void logLongTimeUnassigned(Instant now, ClusterState state) { } List itemsToReport = findLongTimeUnassignedTasks(now, tasks); - - logger.warn("ML persistent tasks unassigned for a long time [{}]", String.join("|", itemsToReport)); + if (itemsToReport.isEmpty() == false) { + logger.warn("ML persistent tasks unassigned for a long time [{}]", String.join("|", itemsToReport)); + } } /** From 6f67d7c605edcaa68addb54a0eba2f34a7011540 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 6 Oct 2023 12:33:30 -0400 Subject: [PATCH 035/176] ESQL: Reenable yaml tests (#100406) This reenables are yaml tests we disabled while building block tracking. They all pass for me. I believe we've covered their cases. --- .../resources/rest-api-spec/test/10_basic.yml | 4 -- .../resources/rest-api-spec/test/20_aggs.yml | 4 -- .../rest-api-spec/test/50_index_patterns.yml | 59 +++++++++---------- 3 files changed, 29 insertions(+), 38 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml index 0171e22304c64..fdd5cf2566961 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml @@ -300,10 +300,6 @@ setup: --- "Test Mixed Input Params": - - skip: - version: all - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/99826" - - do: esql.query: body: diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/20_aggs.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/20_aggs.yml index febd10a0b2c9b..6e8c0eb120ddd 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/20_aggs.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/20_aggs.yml @@ -418,10 +418,6 @@ setup: --- "Test Eval With Null And Count": - - skip: - version: all - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/99826" - - do: esql.query: body: diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml index bae0e623d12a3..280a32aa10cd3 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml @@ -235,36 +235,35 @@ disjoint_mappings: - length: { values: 1 } - match: { values.0.0: 2 } -# AwaitsFix https://github.com/elastic/elasticsearch/issues/99826 -# - do: -# esql.query: -# body: -# query: 'from test1,test2 | sort message1, message2 | eval x = message1, y = message2 + 1 | keep message1, message2, x, y' -# - match: { columns.0.name: message1 } -# - match: { columns.0.type: keyword } -# - match: { columns.1.name: message2 } -# - match: { columns.1.type: long } -# - match: { columns.2.name: x } -# - match: { columns.2.type: keyword } -# - match: { columns.3.name: y } -# - match: { columns.3.type: long } -# - length: { values: 4 } -# - match: { values.0.0: foo1 } -# - match: { values.0.1: null } -# - match: { values.0.2: foo1 } -# - match: { values.0.3: null } -# - match: { values.1.0: foo2 } -# - match: { values.1.1: null } -# - match: { values.1.2: foo2 } -# - match: { values.1.3: null } -# - match: { values.2.0: null } -# - match: { values.2.1: 1 } -# - match: { values.2.2: null } -# - match: { values.2.3: 2 } -# - match: { values.3.0: null } -# - match: { values.3.1: 2 } -# - match: { values.3.2: null } -# - match: { values.3.3: 3 } + - do: + esql.query: + body: + query: 'from test1,test2 | sort message1, message2 | eval x = message1, y = message2 + 1 | keep message1, message2, x, y' + - match: { columns.0.name: message1 } + - match: { columns.0.type: keyword } + - match: { columns.1.name: message2 } + - match: { columns.1.type: long } + - match: { columns.2.name: x } + - match: { columns.2.type: keyword } + - match: { columns.3.name: y } + - match: { columns.3.type: long } + - length: { values: 4 } + - match: { values.0.0: foo1 } + - match: { values.0.1: null } + - match: { values.0.2: foo1 } + - match: { values.0.3: null } + - match: { values.1.0: foo2 } + - match: { values.1.1: null } + - match: { values.1.2: foo2 } + - match: { values.1.3: null } + - match: { values.2.0: null } + - match: { values.2.1: 1 } + - match: { values.2.2: null } + - match: { values.2.3: 2 } + - match: { values.3.0: null } + - match: { values.3.1: 2 } + - match: { values.3.2: null } + - match: { values.3.3: 3 } --- same_name_different_type: From 8b9d413ff5aa21d94107fd69462353abd36d3332 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 6 Oct 2023 12:34:28 -0400 Subject: [PATCH 036/176] ESQL: Reenable another csv test (#100411) This one was also disabled as part of our effort to do Block tracking and now it passes. I believe it was disabled when we didn't close Blocks on the way into `EVAL`. But now we do, so we're good to enable this one! --- .../esql/qa/testFixtures/src/main/resources/keep.csv-spec | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec index 13a8b8f66fc4f..3637081c3c4b6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec @@ -426,8 +426,7 @@ nullsum:integer | salary:integer null | 74999 ; -# AwaitsFix https://github.com/elastic/elasticsearch/issues/99826 -evalWithNullAndAvg-Ignore +evalWithNullAndAvg from employees | eval nullsum = salary + null | stats avg(nullsum), count(nullsum); avg(nullsum):double | count(nullsum):long From a708501fd501e7f765d721b43973c43e4ae06c71 Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Fri, 6 Oct 2023 13:37:55 -0400 Subject: [PATCH 037/176] Search of remote clusters with no shards results in successful status. (#100354) ccs_minimize_roundtrips=true was not checking if no shards were present to search before setting the cluster search status. If the number of shards is zero, then cluster search status should be 'successful'. This aligns ccs_minimize_roundtrips=true with the false setting and how it worked in earlier versions. Fixes bug #100350 --- docs/changelog/100354.yaml | 5 + .../search/ccs/CrossClusterSearchIT.java | 60 +++++++++ .../action/search/TransportSearchAction.java | 9 +- .../search/CrossClusterAsyncSearchIT.java | 118 ++++++++++++++++++ 4 files changed, 188 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/100354.yaml diff --git a/docs/changelog/100354.yaml b/docs/changelog/100354.yaml new file mode 100644 index 0000000000000..d2e50e744eb88 --- /dev/null +++ b/docs/changelog/100354.yaml @@ -0,0 +1,5 @@ +pr: 100354 +summary: Search of remote clusters with no shards results in successful status +area: Search +type: bug +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 4267c596e9f07..0be427a5fd09d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -271,6 +271,66 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); } + // tests bug fix https://github.com/elastic/elasticsearch/issues/100350 + public void testClusterDetailsAfterCCSWhereRemoteClusterHasNoShardsToSearch() throws Exception { + Map testClusterInfo = setupTwoClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); + + SearchRequest searchRequest = new SearchRequest(localIndex, REMOTE_CLUSTER + ":" + "no_such_index*"); + if (randomBoolean()) { + searchRequest = searchRequest.scroll("1m"); + } + searchRequest.allowPartialSearchResults(false); + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + boolean minimizeRoundtrips = randomBoolean(); + searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); + boolean dfs = randomBoolean(); + if (dfs) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } + searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); + + SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); + assertNotNull(searchResponse); + + SearchResponse.Clusters clusters = searchResponse.getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); + + SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo("no_such_index*")); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(0)); // no shards since index does not exist + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertNotNull(remoteClusterSearchInfo.getTook()); + } + public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws Exception { Map testClusterInfo = setupTwoClusters(); String localIndex = (String) testClusterInfo.get("local.index"); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index caebf6c81dbba..a2739e2c2a85e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -810,15 +810,16 @@ private static void ccsClusterInfoUpdate( ) { /* * Cluster Status logic: - * 1) FAILED if all shards failed and skip_unavailable=false - * 2) SKIPPED if all shards failed and skip_unavailable=true + * 1) FAILED if total_shards > 0 && all shards failed && skip_unavailable=false + * 2) SKIPPED if total_shards > 0 && all shards failed && skip_unavailable=true * 3) PARTIAL if it timed out * 4) PARTIAL if it at least one of the shards succeeded but not all * 5) SUCCESSFUL if no shards failed (and did not time out) */ clusters.swapCluster(clusterAlias, (k, v) -> { SearchResponse.Cluster.Status status; - if (searchResponse.getFailedShards() >= searchResponse.getTotalShards()) { + int totalShards = searchResponse.getTotalShards(); + if (totalShards > 0 && searchResponse.getFailedShards() >= totalShards) { if (skipUnavailable) { status = SearchResponse.Cluster.Status.SKIPPED; } else { @@ -832,7 +833,7 @@ private static void ccsClusterInfoUpdate( status = SearchResponse.Cluster.Status.SUCCESSFUL; } return new SearchResponse.Cluster.Builder(v).setStatus(status) - .setTotalShards(searchResponse.getTotalShards()) + .setTotalShards(totalShards) .setSuccessfulShards(searchResponse.getSuccessfulShards()) .setSkippedShards(searchResponse.getSkippedShards()) .setFailedShards(searchResponse.getFailedShards()) diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index f079828ccb5bf..803a45ad13b07 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -759,6 +759,124 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce } } + // tests bug fix https://github.com/elastic/elasticsearch/issues/100350 + public void testClusterDetailsAfterCCSWhereRemoteClusterHasNoShardsToSearch() throws Exception { + Map testClusterInfo = setupTwoClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); + + SearchListenerPlugin.blockQueryPhase(); + + // query against a missing index on the remote cluster + SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(localIndex, REMOTE_CLUSTER + ":" + "no_such_index*"); + request.setCcsMinimizeRoundtrips(randomBoolean()); + request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); + request.setKeepOnCompletion(true); + request.getSearchRequest().source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } + boolean dfs = randomBoolean(); + if (dfs) { + request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + + AsyncSearchResponse response = submitAsyncSearch(request); + assertNotNull(response.getSearchResponse()); + assertTrue(response.isRunning()); + + boolean minimizeRoundtrips = TransportSearchAction.shouldMinimizeRoundtrips(request.getSearchRequest()); + + assertNotNull(response.getSearchResponse()); + assertTrue(response.isRunning()); + { + SearchResponse.Clusters clusters = response.getSearchResponse().getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults()); + + SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); + + SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); + } + + SearchListenerPlugin.waitSearchStarted(); + SearchListenerPlugin.allowQueryPhase(); + + waitForSearchTasksToFinish(); + + { + AsyncSearchResponse finishedResponse = getAsyncSearch(response.getId()); + assertFalse(finishedResponse.isPartial()); + + SearchResponse.Clusters clusters = finishedResponse.getSearchResponse().getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); + + SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(0)); // will be zero since index does not index + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + + assertNotNull(remoteClusterSearchInfo.getTook()); + assertFalse(remoteClusterSearchInfo.isTimedOut()); + } + // check that the async_search/status response includes the same cluster details + { + AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); + assertFalse(statusResponse.isPartial()); + + SearchResponse.Clusters clusters = statusResponse.getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.FAILED), equalTo(0)); + + SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); + + SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(0)); // will be zero since index does not index + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + + assertNotNull(remoteClusterSearchInfo.getTook()); + assertFalse(remoteClusterSearchInfo.isTimedOut()); + } + } + public void testCCSWithSearchTimeout() throws Exception { Map testClusterInfo = setupTwoClusters(); String localIndex = (String) testClusterInfo.get("local.index"); From 003912bd569bf9fec72db9bf9a58967e1716b8fb Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Fri, 6 Oct 2023 13:53:45 -0400 Subject: [PATCH 038/176] Add healthcheck for shibboleth-idp in idp-fixture (#100369) --- x-pack/test/idp-fixture/build.gradle | 1 + x-pack/test/idp-fixture/docker-compose.yml | 8 +++++++- x-pack/test/idp-fixture/idp/bin/run-jetty.sh | 17 ++++++++++++++++- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/x-pack/test/idp-fixture/build.gradle b/x-pack/test/idp-fixture/build.gradle index 0f5363a278f60..b645afd4922ba 100644 --- a/x-pack/test/idp-fixture/build.gradle +++ b/x-pack/test/idp-fixture/build.gradle @@ -6,6 +6,7 @@ apply plugin: 'elasticsearch.test.fixtures' dockerCompose { composeAdditionalArgs = ['--compatibility'] + upAdditionalArgs = ["--wait"] } tasks.named("preProcessFixture").configure { diff --git a/x-pack/test/idp-fixture/docker-compose.yml b/x-pack/test/idp-fixture/docker-compose.yml index 11a8ec7a7bb3d..e431fa4ede611 100644 --- a/x-pack/test/idp-fixture/docker-compose.yml +++ b/x-pack/test/idp-fixture/docker-compose.yml @@ -1,4 +1,4 @@ -version: '3.7' +version: "3.7" services: openldap: command: --copy-service --loglevel debug @@ -37,6 +37,12 @@ services: links: - openldap:openldap restart: always #ensure ephemeral port mappings are properly updated + healthcheck: + test: curl -f -s --http0.9 http://localhost:4443 --connect-timeout 10 --max-time 10 --output - > /dev/null + interval: 5s + timeout: 20s + retries: 60 + start_period: 10s oidc-provider: build: diff --git a/x-pack/test/idp-fixture/idp/bin/run-jetty.sh b/x-pack/test/idp-fixture/idp/bin/run-jetty.sh index af795963b9712..24ece94c2715d 100644 --- a/x-pack/test/idp-fixture/idp/bin/run-jetty.sh +++ b/x-pack/test/idp-fixture/idp/bin/run-jetty.sh @@ -10,4 +10,19 @@ fi export JETTY_ARGS="jetty.sslContext.keyStorePassword=$JETTY_BROWSER_SSL_KEYSTORE_PASSWORD jetty.backchannel.sslContext.keyStorePassword=$JETTY_BACKCHANNEL_SSL_KEYSTORE_PASSWORD" sed -i "s/^-Xmx.*$/-Xmx$JETTY_MAX_HEAP/g" /opt/shib-jetty-base/start.ini -exec /opt/jetty-home/bin/jetty.sh run +# For some reason, this container always immediately (in less than 1 second) exits with code 0 when starting for the first time +# Even with a health check, docker-compose will immediately report the container as unhealthy when using --wait instead of waiting for it to become healthy +# So, let's just start it a second time if it exits quickly +set +e +start_time=$(date +%s) +/opt/jetty-home/bin/jetty.sh run +exit_code=$? +end_time=$(date +%s) + +duration=$((end_time - start_time)) +if [ $duration -lt 5 ]; then + /opt/jetty-home/bin/jetty.sh run + exit_code=$? +fi + +exit $exit_code From 0ba8a14ca322f9665acb18d24be24b9c163ba0ac Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 6 Oct 2023 11:02:37 -0700 Subject: [PATCH 039/176] Release page in FilterOperator when all values null (#100440) We should release the input Page when we discard it in the FilterOperator. --- .../java/org/elasticsearch/compute/operator/FilterOperator.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java index db8e0c4708e2c..1770af60f2252 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java @@ -44,6 +44,7 @@ protected Page process(Page page) { try (Block.Ref ref = evaluator.eval(page)) { if (ref.block().areAllValuesNull()) { // All results are null which is like false. No values selected. + page.releaseBlocks(); return null; } BooleanBlock test = (BooleanBlock) ref.block(); From f0f86a1866da46a9e417fc98bf43884a23f5da9a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 6 Oct 2023 14:07:21 -0400 Subject: [PATCH 040/176] ESQL: Reenable another block tracking test (#100424) This reeanbles another test we disabled while working on block tracking the works now. --- .../elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java index e6dc165a75509..9b44a7eaf8e2f 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java @@ -66,11 +66,7 @@ public void testSortByManyLongsSuccess() throws IOException { /** * This used to crash the node with an out of memory, but now it just trips a circuit breaker. - *

- * AwaitsFix because we don't properly clear the breaker. Cranky should help here. - *

*/ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") public void testSortByManyLongsTooMuchMemory() throws IOException { initManyLongs(); assertCircuitBreaks(() -> sortByManyLongs(5000)); From 7ab9c671e1264f38c06972c4212a65a05884f69c Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 6 Oct 2023 11:25:08 -0700 Subject: [PATCH 041/176] Ensure JULBridgeTests resets root logging level after test (#100441) --- .../elasticsearch/common/logging/JULBridgeTests.java | 11 +++++++---- .../snapshots/SnapshotResiliencyTests.java | 2 -- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java b/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java index 950859f3b1193..5f86c6ba559ae 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/JULBridgeTests.java @@ -21,9 +21,9 @@ import org.junit.BeforeClass; import java.util.logging.ConsoleHandler; +import java.util.logging.Handler; import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.instanceOf; @@ -31,7 +31,7 @@ public class JULBridgeTests extends ESTestCase { private static final java.util.logging.Logger logger = java.util.logging.Logger.getLogger(""); private static java.util.logging.Level savedLevel; - private static java.util.logging.Handler[] savedHandlers; + private static Handler[] savedHandlers; @BeforeClass public static void saveLoggerState() { @@ -60,10 +60,12 @@ public static void restoreLoggerState() { private void assertLogged(Runnable loggingCode, LoggingExpectation... expectations) { Logger testLogger = LogManager.getLogger(""); - Loggers.setLevel(testLogger, Level.ALL); + Level savedLevel = testLogger.getLevel(); MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); + try { + Loggers.setLevel(testLogger, Level.ALL); + mockAppender.start(); Loggers.addAppender(testLogger, mockAppender); for (var expectation : expectations) { mockAppender.addExpectation(expectation); @@ -71,6 +73,7 @@ private void assertLogged(Runnable loggingCode, LoggingExpectation... expectatio loggingCode.run(); mockAppender.assertAllExpectationsMatched(); } finally { + Loggers.setLevel(testLogger, savedLevel); Loggers.removeAppender(testLogger, mockAppender); mockAppender.stop(); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index a04273186dca7..1b5ff3f39be22 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -228,7 +227,6 @@ import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100434") public class SnapshotResiliencyTests extends ESTestCase { private DeterministicTaskQueue deterministicTaskQueue; From 87b18152854c0cc8baa0e14b8f0d1f4a8f282948 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 6 Oct 2023 19:27:16 +0100 Subject: [PATCH 042/176] Avoid resetting feature states before 7.13.0 (#100423) This API was introduced in 7.13.0, we cannot call it on earlier nodes. --- .../java/org/elasticsearch/test/rest/ESRestTestCase.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 9a3c03810c943..4e769881c39a0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -561,8 +561,14 @@ protected boolean preserveTemplatesUponCompletion() { */ protected boolean resetFeatureStates() { try { + final Version minimumNodeVersion = minimumNodeVersion(); + // Reset feature state API was introduced in 7.13.0 + if (minimumNodeVersion.before(Version.V_7_13_0)) { + return false; + } + // ML reset fails when ML is disabled in versions before 8.7 - if (isMlEnabled() == false && minimumNodeVersion().before(Version.V_8_7_0)) { + if (isMlEnabled() == false && minimumNodeVersion.before(Version.V_8_7_0)) { return false; } } catch (IOException e) { From 0770e3def89986f788e13aaf3bf1a823ea302b3b Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Fri, 6 Oct 2023 14:31:32 -0400 Subject: [PATCH 043/176] Revert "Add healthcheck for shibboleth-idp in idp-fixture (#100369)" This reverts commit 003912bd569bf9fec72db9bf9a58967e1716b8fb. --- x-pack/test/idp-fixture/build.gradle | 1 - x-pack/test/idp-fixture/docker-compose.yml | 8 +------- x-pack/test/idp-fixture/idp/bin/run-jetty.sh | 17 +---------------- 3 files changed, 2 insertions(+), 24 deletions(-) diff --git a/x-pack/test/idp-fixture/build.gradle b/x-pack/test/idp-fixture/build.gradle index b645afd4922ba..0f5363a278f60 100644 --- a/x-pack/test/idp-fixture/build.gradle +++ b/x-pack/test/idp-fixture/build.gradle @@ -6,7 +6,6 @@ apply plugin: 'elasticsearch.test.fixtures' dockerCompose { composeAdditionalArgs = ['--compatibility'] - upAdditionalArgs = ["--wait"] } tasks.named("preProcessFixture").configure { diff --git a/x-pack/test/idp-fixture/docker-compose.yml b/x-pack/test/idp-fixture/docker-compose.yml index e431fa4ede611..11a8ec7a7bb3d 100644 --- a/x-pack/test/idp-fixture/docker-compose.yml +++ b/x-pack/test/idp-fixture/docker-compose.yml @@ -1,4 +1,4 @@ -version: "3.7" +version: '3.7' services: openldap: command: --copy-service --loglevel debug @@ -37,12 +37,6 @@ services: links: - openldap:openldap restart: always #ensure ephemeral port mappings are properly updated - healthcheck: - test: curl -f -s --http0.9 http://localhost:4443 --connect-timeout 10 --max-time 10 --output - > /dev/null - interval: 5s - timeout: 20s - retries: 60 - start_period: 10s oidc-provider: build: diff --git a/x-pack/test/idp-fixture/idp/bin/run-jetty.sh b/x-pack/test/idp-fixture/idp/bin/run-jetty.sh index 24ece94c2715d..af795963b9712 100644 --- a/x-pack/test/idp-fixture/idp/bin/run-jetty.sh +++ b/x-pack/test/idp-fixture/idp/bin/run-jetty.sh @@ -10,19 +10,4 @@ fi export JETTY_ARGS="jetty.sslContext.keyStorePassword=$JETTY_BROWSER_SSL_KEYSTORE_PASSWORD jetty.backchannel.sslContext.keyStorePassword=$JETTY_BACKCHANNEL_SSL_KEYSTORE_PASSWORD" sed -i "s/^-Xmx.*$/-Xmx$JETTY_MAX_HEAP/g" /opt/shib-jetty-base/start.ini -# For some reason, this container always immediately (in less than 1 second) exits with code 0 when starting for the first time -# Even with a health check, docker-compose will immediately report the container as unhealthy when using --wait instead of waiting for it to become healthy -# So, let's just start it a second time if it exits quickly -set +e -start_time=$(date +%s) -/opt/jetty-home/bin/jetty.sh run -exit_code=$? -end_time=$(date +%s) - -duration=$((end_time - start_time)) -if [ $duration -lt 5 ]; then - /opt/jetty-home/bin/jetty.sh run - exit_code=$? -fi - -exit $exit_code +exec /opt/jetty-home/bin/jetty.sh run From 8d654d0cfd5a2f1389786d94accdef3f276e6db2 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Fri, 6 Oct 2023 21:49:10 +0300 Subject: [PATCH 044/176] ESQL: Add identity check in Block equality (#100377) Fix #100374 --- docs/changelog/100377.yaml | 6 ++++++ .../org/elasticsearch/compute/data/BooleanBlock.java | 3 +++ .../org/elasticsearch/compute/data/BytesRefBlock.java | 3 +++ .../org/elasticsearch/compute/data/DoubleBlock.java | 3 +++ .../org/elasticsearch/compute/data/IntBlock.java | 3 +++ .../org/elasticsearch/compute/data/LongBlock.java | 3 +++ .../main/java/org/elasticsearch/compute/data/DocBlock.java | 2 +- .../java/org/elasticsearch/compute/data/X-Block.java.st | 3 +++ 8 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/100377.yaml diff --git a/docs/changelog/100377.yaml b/docs/changelog/100377.yaml new file mode 100644 index 0000000000000..a4cbb0ba46a61 --- /dev/null +++ b/docs/changelog/100377.yaml @@ -0,0 +1,6 @@ +pr: 100377 +summary: "ESQL: Add identity check in Block equality" +area: ES|QL +type: bug +issues: + - 100374 diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index 5b58e7bcf5c30..632ebdeaa2882 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -112,6 +112,9 @@ default void writeTo(StreamOutput out) throws IOException { * equals method works properly across different implementations of the BooleanBlock interface. */ static boolean equals(BooleanBlock block1, BooleanBlock block2) { + if (block1 == block2) { + return true; + } final int positions = block1.getPositionCount(); if (positions != block2.getPositionCount()) { return false; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index 9c48ac61d5a1b..64ec7347ebeb6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -116,6 +116,9 @@ default void writeTo(StreamOutput out) throws IOException { * equals method works properly across different implementations of the BytesRefBlock interface. */ static boolean equals(BytesRefBlock block1, BytesRefBlock block2) { + if (block1 == block2) { + return true; + } final int positions = block1.getPositionCount(); if (positions != block2.getPositionCount()) { return false; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index a3dba750556ab..7e8c47263630b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -112,6 +112,9 @@ default void writeTo(StreamOutput out) throws IOException { * equals method works properly across different implementations of the DoubleBlock interface. */ static boolean equals(DoubleBlock block1, DoubleBlock block2) { + if (block1 == block2) { + return true; + } final int positions = block1.getPositionCount(); if (positions != block2.getPositionCount()) { return false; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index d343428aab2bc..32b7024963e87 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -112,6 +112,9 @@ default void writeTo(StreamOutput out) throws IOException { * equals method works properly across different implementations of the IntBlock interface. */ static boolean equals(IntBlock block1, IntBlock block2) { + if (block1 == block2) { + return true; + } final int positions = block1.getPositionCount(); if (positions != block2.getPositionCount()) { return false; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index 9ff3a5ba116a4..dd3a9d79fbaf7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -112,6 +112,9 @@ default void writeTo(StreamOutput out) throws IOException { * equals method works properly across different implementations of the LongBlock interface. */ static boolean equals(LongBlock block1, LongBlock block2) { + if (block1 == block2) { + return true; + } final int positions = block1.getPositionCount(); if (positions != block2.getPositionCount()) { return false; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java index 6bcf913ce6240..ccd740bc91ba9 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java @@ -62,7 +62,7 @@ public boolean equals(Object obj) { if (obj instanceof DocBlock == false) { return false; } - return vector.equals(((DocBlock) obj).vector); + return this == obj || vector.equals(((DocBlock) obj).vector); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index e8ccc83b51351..596f014eaa577 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -132,6 +132,9 @@ $endif$ * equals method works properly across different implementations of the $Type$Block interface. */ static boolean equals($Type$Block block1, $Type$Block block2) { + if (block1 == block2) { + return true; + } final int positions = block1.getPositionCount(); if (positions != block2.getPositionCount()) { return false; From 44068cbdc25d657c319a987aee064a2492ea56d3 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Fri, 6 Oct 2023 21:50:54 +0300 Subject: [PATCH 045/176] ESQL: Page shouldn't close a block twice (#100370) Page now takes into account that a block can be used in multiple positions (such as the same column aliased under multiple names). Introduce newPageAndRelease method that handles clean-up of blocks that are not-used when creating a new page Relates #100001 Fix #100365 Fix #100356 --- docs/changelog/100370.yaml | 7 +++ .../org/elasticsearch/compute/data/Page.java | 53 ++++++++++++++++--- .../compute/operator/ProjectOperator.java | 27 +--------- .../compute/data/BasicPageTests.java | 19 +++++++ .../operator/ProjectOperatorTests.java | 10 +--- .../src/main/resources/rename.csv-spec | 9 ++-- .../xpack/esql/action/EsqlActionIT.java | 2 - .../esql/planner/LocalExecutionPlanner.java | 4 +- 8 files changed, 80 insertions(+), 51 deletions(-) create mode 100644 docs/changelog/100370.yaml diff --git a/docs/changelog/100370.yaml b/docs/changelog/100370.yaml new file mode 100644 index 0000000000000..3e2e1b762c654 --- /dev/null +++ b/docs/changelog/100370.yaml @@ -0,0 +1,7 @@ +pr: 100370 +summary: "ESQL: Page shouldn't close a block twice" +area: ES|QL +type: bug +issues: + - 100356 + - 100365 diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java index 0265013eb2029..451a0b540f308 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Page.java @@ -14,6 +14,8 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collections; +import java.util.IdentityHashMap; import java.util.Objects; /** @@ -88,9 +90,7 @@ private Page(Page prev, Block[] toAdd) { this.positionCount = prev.positionCount; this.blocks = Arrays.copyOf(prev.blocks, prev.blocks.length + toAdd.length); - for (int i = 0; i < toAdd.length; i++) { - this.blocks[prev.blocks.length + i] = toAdd[i]; - } + System.arraycopy(toAdd, 0, this.blocks, prev.blocks.length, toAdd.length); } public Page(StreamInput in) throws IOException { @@ -177,8 +177,8 @@ public Page appendPage(Page toAdd) { @Override public int hashCode() { int result = Objects.hash(positionCount); - for (int i = 0; i < blocks.length; i++) { - result = 31 * result + Objects.hashCode(blocks[i]); + for (Block block : blocks) { + result = 31 * result + Objects.hashCode(block); } return result; } @@ -229,7 +229,48 @@ public void writeTo(StreamOutput out) throws IOException { * Release all blocks in this page, decrementing any breakers accounting for these blocks. */ public void releaseBlocks() { + if (blocksReleased) { + return; + } + blocksReleased = true; - Releasables.closeExpectNoException(blocks); + + // blocks can be used as multiple columns + var map = new IdentityHashMap(mapSize(blocks.length)); + for (Block b : blocks) { + if (map.putIfAbsent(b, Boolean.TRUE) == null) { + Releasables.closeExpectNoException(b); + } + } + } + + /** + * Returns a Page from the given blocks and closes all blocks that are not included, from the current Page. + * That is, allows clean-up of the current page _after_ external manipulation of the blocks. + * The current page should no longer be used and be considered closed. + */ + public Page newPageAndRelease(Block... keep) { + if (blocksReleased) { + throw new IllegalStateException("can't create new page from already released page"); + } + + blocksReleased = true; + + var newPage = new Page(positionCount, keep); + var set = Collections.newSetFromMap(new IdentityHashMap(mapSize(keep.length))); + set.addAll(Arrays.asList(keep)); + + // close blocks that have been left out + for (Block b : blocks) { + if (set.contains(b) == false) { + Releasables.closeExpectNoException(b); + } + } + + return newPage; + } + + static int mapSize(int expectedSize) { + return expectedSize < 2 ? expectedSize + 1 : (int) (expectedSize / 0.75 + 1.0); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java index b4fb830aed641..6e52a5351de58 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ProjectOperator.java @@ -10,9 +10,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.Page; import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; -import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -70,30 +68,7 @@ protected Page process(Page page) { var block = page.getBlock(source); blocks[b++] = block; } - closeUnused(page, blocks); - return new Page(page.getPositionCount(), blocks); - } - - /** - * Close all {@link Block}s that are in {@code page} but are not in {@code blocks}. - */ - public static void closeUnused(Page page, Block[] blocks) { - List blocksToRelease = new ArrayList<>(); - - for (int i = 0; i < page.getBlockCount(); i++) { - boolean used = false; - var current = page.getBlock(i); - for (int j = 0; j < blocks.length; j++) { - if (current == blocks[j]) { - used = true; - break; - } - } - if (used == false) { - blocksToRelease.add(current); - } - } - Releasables.close(blocksToRelease); + return page.newPageAndRelease(blocks); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java index 25aa957e90cff..23a257e7afbbe 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java @@ -196,6 +196,25 @@ public void testSerializationListPages() throws IOException { } } + public void testPageMultiRelease() { + int positions = randomInt(1024); + var block = new IntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock(); + Page page = new Page(block); + page.releaseBlocks(); + assertThat(block.isReleased(), is(true)); + page.releaseBlocks(); + } + + public void testNewPageAndRelease() { + int positions = randomInt(1024); + var blockA = new IntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock(); + var blockB = new IntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock(); + Page page = new Page(blockA, blockB); + Page newPage = page.newPageAndRelease(blockA); + assertThat(blockA.isReleased(), is(false)); + assertThat(blockB.isReleased(), is(true)); + } + BytesRefArray bytesRefArrayOf(String... values) { var array = new BytesRefArray(values.length, bigArrays); Arrays.stream(values).map(BytesRef::new).forEach(array::append); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java index 59e85390fc522..1acdbc4895c94 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java @@ -15,14 +15,11 @@ import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; -import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Tuple; import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.util.Arrays; -import java.util.HashSet; import java.util.List; -import java.util.Set; import java.util.stream.LongStream; import static org.hamcrest.Matchers.equalTo; @@ -59,15 +56,12 @@ public void testProjection() { var out = projection.getOutput(); assertThat(randomProjection.size(), lessThanOrEqualTo(out.getBlockCount())); - Set blks = new HashSet<>(); for (int i = 0; i < out.getBlockCount(); i++) { var block = out.getBlock(i); - assertEquals(block, page.getBlock(randomProjection.get(i))); - blks.add(block); + assertEquals(blocks[randomProjection.get(i)], block); } - // close all blocks separately since the same block can be used by multiple columns (aliased) - Releasables.closeWhileHandlingException(blks.toArray(new Block[0])); + out.releaseBlocks(); } private List randomProjection(int size) { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec index 44cf92254298b..5e5c70e3cbba7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec @@ -84,8 +84,7 @@ x:integer | z:integer 4 | 8 ; -# AwaitsFix https://github.com/elastic/elasticsearch/issues/100356 -renameProjectEval-Ignore +renameProjectEval from employees | sort emp_no | eval y = languages | rename languages as x | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3; x:integer | y:integer | x2:integer | y2:integer @@ -94,8 +93,7 @@ x:integer | y:integer | x2:integer | y2:integer 4 | 4 | 5 | 6 ; -# AwaitsFix https://github.com/elastic/elasticsearch/issues/100356 -duplicateProjectEval-Ignore +duplicateProjectEval from employees | eval y = languages, x = languages | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3; x:integer | y:integer | x2:integer | y2:integer @@ -160,8 +158,7 @@ y:integer | x:date 10061 | 1985-09-17T00:00:00.000Z ; -# AwaitsFix https://github.com/elastic/elasticsearch/issues/100356 -renameIntertwinedWithSort-Ignore +renameIntertwinedWithSort FROM employees | eval x = salary | rename x as y | rename y as x | sort x | rename x as y | limit 10; avg_worked_seconds:l | birth_date:date | emp_no:i | first_name:s | gender:s | height:d | height.float:d | height.half_float:d | height.scaled_float:d| hire_date:date | is_rehired:bool | job_positions:s | languages:i | languages.byte:i | languages.long:l | languages.short:i | last_name:s | salary:i | salary_change:d | salary_change.int:i | salary_change.keyword:s | salary_change.long:l | still_hired:bool | y:i diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index 46d85746c3990..fd4fe13b9c1b1 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.action; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Build; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -67,7 +66,6 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.nullValue; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100365") public class EsqlActionIT extends AbstractEsqlIntegTestCase { long epoch = System.currentTimeMillis(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index df7b921f6e585..1c26de4a599f5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -30,7 +30,6 @@ import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.Operator.OperatorFactory; import org.elasticsearch.compute.operator.OutputOperator.OutputOperatorFactory; -import org.elasticsearch.compute.operator.ProjectOperator; import org.elasticsearch.compute.operator.RowOperator.RowOperatorFactory; import org.elasticsearch.compute.operator.ShowOperator; import org.elasticsearch.compute.operator.SinkOperator; @@ -334,8 +333,7 @@ private static Function alignPageToAttributes(List attrs, for (int i = 0; i < blocks.length; i++) { blocks[i] = p.getBlock(mappedPosition[i]); } - ProjectOperator.closeUnused(p, blocks); - return new Page(blocks); + return p.newPageAndRelease(blocks); } : Function.identity(); return transformer; From c485264c61d794d76de9b6c30efacff03d6d65b3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 6 Oct 2023 21:20:49 +0100 Subject: [PATCH 046/176] Parallelise sub-requests in GET _cat/nodes (#99789) Similar to #93437 we can run all the sub-requests in parallel. --- .../rest/action/cat/RestNodesAction.java | 84 ++++++++++--------- 1 file changed, 45 insertions(+), 39 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 1c4d5f7d8fdbd..e8395710ede03 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -50,13 +51,13 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestActionListener; import org.elasticsearch.rest.action.RestResponseListener; import org.elasticsearch.script.ScriptStats; import org.elasticsearch.search.suggest.completion.CompletionStats; import java.util.List; import java.util.Locale; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.rest.RestRequest.Method.GET; @@ -80,49 +81,54 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { + + final boolean fullId = request.paramAsBoolean("full_id", false); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); - final boolean fullId = request.paramAsBoolean("full_id", false); - final boolean includeUnloadedSegments = request.paramAsBoolean("include_unloaded_segments", false); - return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { - @Override - public void processResponse(final ClusterStateResponse clusterStateResponse) { - NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); - nodesInfoRequest.clear() - .addMetrics( - NodesInfoMetrics.Metric.JVM.metricName(), - NodesInfoMetrics.Metric.OS.metricName(), - NodesInfoMetrics.Metric.PROCESS.metricName(), - NodesInfoMetrics.Metric.HTTP.metricName() + + final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.clear() + .addMetrics( + NodesInfoMetrics.Metric.JVM.metricName(), + NodesInfoMetrics.Metric.OS.metricName(), + NodesInfoMetrics.Metric.PROCESS.metricName(), + NodesInfoMetrics.Metric.HTTP.metricName() + ); + + final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); + nodesStatsRequest.clear() + .indices(true) + .addMetrics( + NodesStatsRequest.Metric.JVM.metricName(), + NodesStatsRequest.Metric.OS.metricName(), + NodesStatsRequest.Metric.FS.metricName(), + NodesStatsRequest.Metric.PROCESS.metricName(), + NodesStatsRequest.Metric.SCRIPT.metricName() + ); + nodesStatsRequest.indices().includeUnloadedSegments(request.paramAsBoolean("include_unloaded_segments", false)); + + return channel -> { + final var clusterStateRef = new AtomicReference(); + final var nodesInfoRef = new AtomicReference(); + final var nodesStatsRef = new AtomicReference(); + + try (var listeners = new RefCountingListener(new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(Void ignored) throws Exception { + return RestTable.buildResponse( + buildTable(fullId, request, clusterStateRef.get(), nodesInfoRef.get(), nodesStatsRef.get()), + channel ); - client.admin().cluster().nodesInfo(nodesInfoRequest, new RestActionListener(channel) { - @Override - public void processResponse(final NodesInfoResponse nodesInfoResponse) { - NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); - nodesStatsRequest.clear() - .indices(true) - .addMetrics( - NodesStatsRequest.Metric.JVM.metricName(), - NodesStatsRequest.Metric.OS.metricName(), - NodesStatsRequest.Metric.FS.metricName(), - NodesStatsRequest.Metric.PROCESS.metricName(), - NodesStatsRequest.Metric.SCRIPT.metricName() - ); - nodesStatsRequest.indices().includeUnloadedSegments(includeUnloadedSegments); - client.admin().cluster().nodesStats(nodesStatsRequest, new RestResponseListener(channel) { - @Override - public RestResponse buildResponse(NodesStatsResponse nodesStatsResponse) throws Exception { - return RestTable.buildResponse( - buildTable(fullId, request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse), - channel - ); - } - }); - } - }); + } + })) { + final var clusterAdminClient = client.admin().cluster(); + clusterAdminClient.state(clusterStateRequest, listeners.acquire(clusterStateRef::set)); + clusterAdminClient.nodesInfo(nodesInfoRequest, listeners.acquire(nodesInfoRef::set)); + clusterAdminClient.nodesStats(nodesStatsRequest, listeners.acquire(nodesStatsRef::set)); } - }); + }; } @Override From b7eafce32cb1e7dfdcb259aab89b886ff93a1c66 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 6 Oct 2023 23:37:07 +0200 Subject: [PATCH 047/176] Make some practically static methods static (#97565) Another round of automated fixes to this, marking things that can be made static as static. Saves some JIT cycles but also turns some lambdas from capturing to non-capturing and makes the "utilityness" of some classes visible. --- .../org/elasticsearch/geoip/GeoIpCli.java | 10 +-- .../cli/keystore/AddFileKeyStoreCommand.java | 2 +- .../plugins/cli/InstallPluginAction.java | 27 ++++---- .../plugins/cli/ListPluginsCommand.java | 2 +- .../elasticsearch/server/cli/ServerCli.java | 4 +- .../windows/service/ProcrunCommand.java | 4 +- .../provider/EmbeddedImplClassLoader.java | 2 +- .../jdk/ModuleQualifiedExportsService.java | 2 +- .../org/elasticsearch/geometry/Polygon.java | 2 +- .../SimplificationErrorCalculator.java | 4 +- .../java/org/elasticsearch/grok/Grok.java | 2 +- .../elasticsearch/grok/GrokCaptureType.java | 2 +- .../plugin/scanner/NamedComponentScanner.java | 11 ++-- .../scanner/NamedComponentScannerTests.java | 10 ++- .../org/elasticsearch/tdigest/TDigest.java | 2 +- .../support/filtering/FilterPath.java | 4 +- .../metric/ArrayValuesSourceParser.java | 2 +- .../metric/MatrixStatsResults.java | 2 +- .../lifecycle/DataStreamLifecycleService.java | 2 +- .../DataStreamTimestampFieldMapperTests.java | 2 +- .../common/NetworkDirectionProcessor.java | 4 +- .../ingest/common/RerouteProcessor.java | 2 +- .../ingest/geoip/GeoIpDownloader.java | 2 +- .../ingest/geoip/HttpClient.java | 4 +- .../ingest/geoip/GeoIpDownloaderTests.java | 20 +++--- .../ingest/useragent/DeviceTypeParser.java | 2 +- .../ingest/useragent/UserAgentParser.java | 4 +- .../script/mustache/MustacheScriptEngine.java | 2 +- .../painless/ContextGeneratorCommon.java | 2 +- .../painless/PainlessScriptEngine.java | 12 ++-- .../painless/ScriptClassInfo.java | 2 +- .../elasticsearch/painless/antlr/Walker.java | 2 +- .../lookup/PainlessLookupBuilder.java | 12 ++-- ...faultConstantFoldingOptimizationPhase.java | 2 +- .../phase/DefaultSemanticAnalysisPhase.java | 2 +- .../phase/DefaultUserTreeToIRTreePhase.java | 2 +- .../phase/PainlessSemanticAnalysisPhase.java | 2 +- .../phase/PainlessUserTreeToIRTreePhase.java | 6 +- .../elasticsearch/legacygeo/GeoShapeType.java | 2 +- .../legacygeo/builders/PolygonBuilder.java | 2 +- .../legacygeo/builders/ShapeBuilder.java | 2 +- .../mapper/LegacyGeoShapeFieldMapper.java | 2 +- .../extras/MatchOnlyTextFieldMapperTests.java | 2 +- .../SearchAsYouTypeFieldMapperTests.java | 3 +- .../extras/TokenCountFieldMapperTests.java | 2 +- .../ParentChildInnerHitContextBuilder.java | 2 +- .../percolator/PercolatorFieldMapper.java | 2 +- .../PercolatorFieldMapperTests.java | 4 +- .../AbstractBaseReindexRestHandler.java | 2 +- .../AbstractBulkByQueryRestHandler.java | 2 +- ...kIndexByScrollResponseContentListener.java | 2 +- .../repositories/azure/AzureBlobStore.java | 14 ++--- .../repositories/s3/S3BlobStore.java | 2 +- .../blobstore/url/http/URLHttpClient.java | 6 +- .../analysis/icu/IcuTokenizerFactory.java | 4 +- .../analysis/phonetic/KoelnerPhonetik.java | 4 +- .../discovery/ec2/Ec2NameResolver.java | 2 +- .../ConcurrentSeqNoVersioningIT.java | 8 +-- ...TransportPrevalidateNodeRemovalAction.java | 2 +- .../reroute/ClusterRerouteResponse.java | 2 +- ...ReservedComposableIndexTemplateAction.java | 2 +- .../action/ingest/ReservedPipelineAction.java | 2 +- ...ElasticsearchUncaughtExceptionHandler.java | 2 +- .../CoordinationDiagnosticsService.java | 4 +- .../StableMasterHealthIndicatorService.java | 2 +- .../MetadataIndexTemplateService.java | 2 +- .../SystemIndexMetadataUpgradeService.java | 4 +- .../allocation/DiskThresholdSettings.java | 6 +- ...rdsAvailabilityHealthIndicatorService.java | 4 +- .../DesiredBalanceShardsAllocator.java | 6 +- .../common/cli/EnvironmentAwareCommand.java | 2 +- .../common/logging/JULBridge.java | 2 +- .../common/logging/internal/LoggerImpl.java | 2 +- .../common/settings/SettingsModule.java | 2 +- .../elasticsearch/health/HealthService.java | 6 +- .../health/metadata/HealthMetadata.java | 4 +- .../node/DiskHealthIndicatorService.java | 4 +- .../health/node/LocalHealthMonitor.java | 2 +- .../action/TransportHealthNodeAction.java | 2 +- .../selection/HealthNodeTaskExecutor.java | 2 +- .../index/CompositeIndexEventListener.java | 2 +- .../index/codec/PerFieldMapperCodec.java | 4 +- .../codec/tsdb/ES87TSDBDocValuesEncoder.java | 6 +- .../codec/tsdb/ES87TSDBDocValuesProducer.java | 8 +-- .../index/engine/CombinedDeletionPolicy.java | 2 +- .../index/mapper/GeoPointFieldMapper.java | 2 +- .../mapper/IgnoreMalformedStoredValues.java | 4 +- .../index/mapper/TextFieldMapper.java | 2 +- .../FlattenedFieldSyntheticWriterHelper.java | 2 +- .../vectors/DenseVectorFieldMapper.java | 12 ++-- .../query/AbstractGeometryQueryBuilder.java | 2 +- .../analysis/wrappers/StableApiWrappers.java | 2 +- .../elasticsearch/ingest/IngestService.java | 2 +- .../java/org/elasticsearch/node/Node.java | 4 +- .../plugins/PluginIntrospector.java | 2 +- .../scanners/NamedComponentReader.java | 2 +- .../action/ReservedClusterSettingsAction.java | 2 +- .../service/FileSettingsService.java | 2 +- .../service/ReservedClusterStateService.java | 2 +- .../cluster/RestClusterGetSettingsAction.java | 2 +- .../field/vectors/ByteBinaryDenseVector.java | 2 +- .../field/vectors/ByteKnnDenseVector.java | 2 +- .../AbstractInternalHDRPercentiles.java | 2 +- .../AbstractInternalTDigestPercentiles.java | 2 +- .../elasticsearch/search/dfs/DfsPhase.java | 2 +- .../lookup/StoredFieldSourceProvider.java | 2 +- .../vectors/KnnScoreDocQueryBuilder.java | 2 +- .../transport/ProxyConnectionStrategy.java | 2 +- .../transport/RemoteConnectionStrategy.java | 2 +- .../upgrades/MigrationResultsUpdateTask.java | 2 +- .../LinearizabilityCheckerTests.java | 38 ++++++------ .../MetadataIndexTemplateServiceTests.java | 8 +-- ...ystemIndexMetadataUpgradeServiceTests.java | 16 ++--- .../index/mapper/DynamicMappingTests.java | 2 +- .../mapper/TimeSeriesIdFieldMapperTests.java | 5 +- .../recovery/RecoveriesCollectionTests.java | 4 +- .../ReservedClusterStateServiceTests.java | 2 +- .../support/CoreValuesSourceTypeTests.java | 2 +- .../gcs/GoogleCloudStorageHttpHandler.java | 2 +- .../java/fixture/s3/S3HttpFixtureWithEC2.java | 2 +- .../test/disruption/NetworkDisruptionIT.java | 4 +- .../AbstractCoordinatorTestCase.java | 3 +- .../coordination/LinearizabilityChecker.java | 8 +-- .../metadata/DataStreamTestHelper.java | 4 +- .../common/inject/ModuleTestCase.java | 4 +- .../index/engine/TranslogHandler.java | 2 +- .../index/mapper/MapperServiceTestCase.java | 43 +++++++------ .../index/mapper/MapperTestCase.java | 2 +- .../ESIndexLevelReplicationTestCase.java | 4 +- .../AbstractIndexRecoveryIntegTestCase.java | 2 +- .../AbstractThirdPartyRepositoryTestCase.java | 2 +- ...ESMockAPIBasedRepositoryIntegTestCase.java | 2 +- .../script/MockScriptEngine.java | 2 +- .../aggregations/AggregatorTestCase.java | 10 +-- .../geogrid/GeoGridAggregatorTestCase.java | 4 +- .../search/geo/BaseShapeIntegTestCase.java | 2 +- .../search/geo/GeoShapeIntegTestCase.java | 2 +- .../elasticsearch/test/ESIntegTestCase.java | 4 +- .../test/InternalAggregationTestCase.java | 2 +- .../test/disruption/LongGCDisruption.java | 2 +- .../junit/listeners/ReproduceInfoPrinter.java | 2 +- .../test/rest/ESRestTestCase.java | 12 ++-- .../elasticsearch/test/rest/ObjectPath.java | 4 +- .../test/store/MockFSDirectoryFactory.java | 4 +- .../test/transport/CapturingTransport.java | 2 +- .../AbstractSimpleTransportTestCase.java | 2 +- .../loggerusage/ESLoggerUsageChecker.java | 2 +- .../cluster/local/WaitForHttpResource.java | 4 +- .../test/cluster/util/ProcessReaper.java | 2 +- .../AbstractSchemaValidationTestCase.java | 2 +- .../rest/yaml/ClientYamlTestResponse.java | 2 +- .../ClientYamlSuiteRestApiParser.java | 2 +- .../test/rest/yaml/section/DoSection.java | 2 +- .../license/licensor/LicenseSigner.java | 2 +- ...mulativeCardinalityPipelineAggregator.java | 2 +- .../MovingPercentilesPipelineAggregator.java | 10 +-- .../multiterms/MultiTermsAggregator.java | 2 +- .../ttest/UnpairedTTestAggregator.java | 4 +- .../AutoscalingCalculateCapacityService.java | 6 +- .../nodeinfo/AutoscalingNodeInfoService.java | 6 +- .../FrozenExistenceDeciderService.java | 6 +- .../ReactiveStorageDeciderService.java | 13 +++- .../blobcache/common/SparseFileTracker.java | 2 +- .../license/ClusterStateLicenseService.java | 4 +- .../license/StartBasicClusterTask.java | 2 +- .../sourceonly/SourceOnlySnapshot.java | 4 +- .../mapper/DataTierFieldMapper.java | 2 +- .../ExplainDataFrameAnalyticsAction.java | 4 +- .../action/PutDataFrameAnalyticsAction.java | 4 +- .../core/ml/datafeed/DatafeedConfig.java | 2 +- .../core/ml/datafeed/DatafeedUpdate.java | 2 +- .../evaluation/common/AbstractAucRoc.java | 2 +- .../inference/EnsembleInferenceModel.java | 2 +- .../inference/trainedmodel/tree/TreeNode.java | 2 +- .../xpack/core/ml/job/config/Detector.java | 2 +- .../core/ml/job/results/AnomalyRecord.java | 2 +- .../xpack/core/scheduler/Cron.java | 10 +-- .../apikey/InvalidateApiKeyRequest.java | 2 +- .../PutPrivilegesRequestBuilder.java | 5 +- .../expressiondsl/ExpressionParser.java | 17 ++--- .../DocumentSubsetBitsetCache.java | 2 +- .../authz/permission/IndicesPermission.java | 2 +- .../authz/store/ReservedRolesStore.java | 4 +- .../core/template/IndexTemplateRegistry.java | 2 +- .../action/TransportTermsEnumAction.java | 8 +-- .../transforms/TransformConfigUpdate.java | 2 +- .../transforms/TransformIndexerStats.java | 2 +- .../pivot/DateHistogramGroupSource.java | 4 +- .../core/watcher/actions/ActionWrapper.java | 2 +- .../core/watcher/crypto/CryptoService.java | 2 +- .../authz/store/ReservedRolesStoreTests.java | 62 +++++++++---------- .../watcher/crypto/CryptoServiceTests.java | 12 ++-- .../RestDeprecationInfoAction.java | 2 +- .../TransformDeprecationChecker.java | 2 +- .../test/enrich/CommonEnrichRestTestCase.java | 2 +- .../xpack/enrich/EnrichCache.java | 2 +- .../EnrichPolicyMaintenanceService.java | 6 +- .../xpack/enrich/EnrichPolicyRunner.java | 2 +- .../xpack/enrich/EnrichProcessorFactory.java | 2 +- .../action/EnrichShardMultiSearchAction.java | 2 +- .../EnterpriseSearchUsageTransportAction.java | 7 ++- .../AnalyticsCollectionResolver.java | 6 +- .../action/RestPostAnalyticsEventAction.java | 4 +- .../rules/QueryRulesIndexService.java | 2 +- .../search/SearchApplicationIndexService.java | 7 +-- .../test/eql/BaseEqlSpecTestCase.java | 6 +- .../test/eql/EqlRestTestCase.java | 2 +- .../test/eql/stats/EqlUsageRestTestCase.java | 8 +-- .../xpack/eql/analysis/Verifier.java | 2 +- .../execution/assembler/ExecutionManager.java | 6 +- .../assembler/SampleQueryRequest.java | 2 +- .../execution/sequence/TumblingWindow.java | 2 +- .../xpack/eql/optimizer/Optimizer.java | 4 +- .../xpack/eql/plan/physical/EsQueryExec.java | 2 +- .../container/FieldExtractorRegistry.java | 6 +- .../xpack/eql/session/EqlSession.java | 2 +- .../org/elasticsearch/xpack/fleet/Fleet.java | 20 +++--- .../GetGlobalCheckpointsShardAction.java | 2 +- .../action/TransportGraphExploreAction.java | 6 +- .../graph/rest/action/RestGraphAction.java | 6 +- ...TransportPutSamlServiceProviderAction.java | 2 +- .../idp/privileges/UserPrivilegeResolver.java | 2 +- .../saml/authn/SamlAuthnRequestValidator.java | 20 +++--- ...lAuthenticationResponseMessageBuilder.java | 4 +- .../idp/saml/idp/SamlIdPMetadataBuilder.java | 2 +- .../saml/sp/SamlServiceProviderDocument.java | 8 +-- .../saml/sp/SamlServiceProviderFactory.java | 2 +- .../idp/saml/sp/SamlServiceProviderIndex.java | 2 +- .../idp/saml/sp/WildcardServiceProvider.java | 4 +- .../xpack/idp/saml/support/SamlFactory.java | 28 +++------ .../idp/saml/support/SamlObjectSigner.java | 2 +- .../saml/idp/SamlMetadataGeneratorTests.java | 2 +- .../xpack/idp/saml/test/IdpSamlTestCase.java | 2 +- .../IndexLifecycleUsageTransportAction.java | 2 +- .../xpack/logstash/Logstash.java | 4 +- .../action/TransportDeletePipelineAction.java | 2 +- .../action/TransportGetPipelineAction.java | 8 +-- .../xpack/ml/MachineLearning.java | 2 +- .../MachineLearningUsageTransportAction.java | 25 ++++---- .../xpack/ml/MlAssignmentNotifier.java | 2 +- .../ml/action/TransportCloseJobAction.java | 2 +- .../action/TransportDeleteCalendarAction.java | 2 +- ...ansportDeleteDataFrameAnalyticsAction.java | 2 +- .../action/TransportDeleteForecastAction.java | 2 +- .../ml/action/TransportDeleteJobAction.java | 4 +- .../TransportInternalInferModelAction.java | 2 +- .../ml/action/TransportMlInfoAction.java | 2 +- .../TransportRevertModelSnapshotAction.java | 2 +- ...ransportStartDataFrameAnalyticsAction.java | 4 +- .../action/TransportStartDatafeedAction.java | 2 +- ...TransportStopDataFrameAnalyticsAction.java | 2 +- .../action/TransportStopDatafeedAction.java | 2 +- .../frequentitemsets/EclatMapReducer.java | 2 +- .../xpack/ml/aggs/heuristic/PValueScore.java | 2 +- .../MlProcessorAutoscalingDecider.java | 9 ++- .../xpack/ml/datafeed/DatafeedJob.java | 2 +- .../xpack/ml/datafeed/DatafeedManager.java | 2 +- .../xpack/ml/datafeed/DatafeedRunner.java | 2 +- .../AbstractAggregationDataExtractor.java | 2 +- .../AggregationToJsonProcessor.java | 2 +- .../persistence/DatafeedConfigProvider.java | 4 +- .../extractor/ExtractedFieldsDetector.java | 4 +- .../ExtractedFieldsDetectorFactory.java | 2 +- .../process/AnalyticsProcessManager.java | 9 ++- .../process/DataFrameRowsJoiner.java | 4 +- .../xpack/ml/extractor/GeoPointField.java | 2 +- .../xpack/ml/extractor/GeoShapeField.java | 4 +- .../TrainedModelAssignmentNodeService.java | 2 +- .../TrainedModelAssignmentRebalancer.java | 4 +- .../planning/AllocationReducer.java | 2 +- .../planning/AssignmentPlanner.java | 2 +- .../planning/LinearProgrammingPlanSolver.java | 4 +- .../RandomizedAssignmentRounding.java | 8 +-- .../xpack/ml/inference/nlp/NerProcessor.java | 2 +- .../PrecompiledCharMapNormalizer.java | 8 +-- .../persistence/TrainedModelProvider.java | 8 +-- .../process/PyTorchResultProcessor.java | 2 +- .../pytorch/process/PyTorchStateStreamer.java | 2 +- .../xpack/ml/job/JobManager.java | 4 +- .../xpack/ml/job/NodeLoadDetector.java | 5 +- .../ml/job/persistence/JobConfigProvider.java | 4 +- .../ml/job/persistence/JobDataDeleter.java | 2 +- .../JobRenormalizedResultsPersister.java | 2 +- .../autodetect/AutodetectProcessManager.java | 2 +- .../autodetect/params/FlushJobParams.java | 8 +-- .../process/autodetect/params/TimeRange.java | 4 +- .../writer/AbstractDataToProcessWriter.java | 2 +- .../writer/JsonDataToProcessWriter.java | 2 +- .../retention/ExpiredForecastsRemover.java | 2 +- .../job/retention/ExpiredResultsRemover.java | 2 +- .../xpack/ml/utils/NamedPipeHelper.java | 6 +- .../WrappedBatchedJobsIterator.java | 2 +- ...ransportMonitoringMigrateAlertsAction.java | 6 +- .../monitoring/cleaner/CleanerService.java | 2 +- .../collector/shards/ShardsCollector.java | 2 +- .../xpack/monitoring/exporter/Exporters.java | 2 +- .../http/PublishableHttpResource.java | 4 +- .../http/WatcherExistsHttpResource.java | 2 +- .../exporter/local/LocalExporter.java | 4 +- .../rest/action/RestMonitoringBulkAction.java | 2 +- .../profiling/GetStackTracesResponse.java | 2 +- .../xpack/ql/analyzer/AnalyzerRules.java | 2 +- .../xpack/ql/analyzer/PreAnalyzer.java | 2 +- .../extractor/AbstractFieldHitExtractor.java | 2 +- .../search/extractor/TotalHitsExtractor.java | 2 +- .../xpack/ql/index/IndexResolver.java | 4 +- .../xpack/ql/optimizer/OptimizerRules.java | 16 ++--- .../xpack/ql/plan/QueryPlan.java | 4 +- .../org/elasticsearch/xpack/ql/tree/Node.java | 4 +- .../xpack/rollup/job/RollupIndexer.java | 2 +- .../SearchableSnapshotAllocator.java | 2 +- .../xpack/security/cli/AutoConfigureNode.java | 27 ++++---- .../xpack/security/cli/CertificateTool.java | 2 +- .../security/cli/HttpCertificateCommand.java | 48 +++++++------- .../cli/HttpCertificateCommandTests.java | 23 ++++--- .../xpack/security/Security.java | 1 - .../TransportBaseUpdateApiKeyAction.java | 5 +- .../action/role/TransportGetRolesAction.java | 4 +- .../ReservedRoleMappingAction.java | 2 +- .../TransportSamlCompleteLogoutAction.java | 2 +- .../TransportSamlInvalidateSessionAction.java | 4 +- .../saml/TransportSamlLogoutAction.java | 2 +- ...nsportSamlPrepareAuthenticationAction.java | 6 +- .../audit/logfile/LoggingAuditTrail.java | 15 +++-- .../xpack/security/authc/ApiKeyService.java | 8 +-- ...ossClusterAccessAuthenticationService.java | 2 +- .../xpack/security/authc/Realms.java | 4 +- .../esnative/tool/ResetPasswordTool.java | 4 +- .../esnative/tool/SetupPasswordTool.java | 14 ++--- .../authc/kerberos/KerberosRealm.java | 2 +- .../kerberos/KerberosTicketValidator.java | 2 +- .../authc/ldap/LdapSessionFactory.java | 2 +- .../ldap/support/LdapMetadataResolver.java | 2 +- .../authc/ldap/support/SessionFactory.java | 2 +- .../oidc/OpenIdConnectAuthenticator.java | 12 ++-- .../authc/oidc/OpenIdConnectRealm.java | 2 +- .../authc/saml/SamlAuthenticator.java | 9 ++- .../authc/saml/SamlLogoutRequestHandler.java | 2 +- .../authc/saml/SamlMessageBuilder.java | 2 +- .../authc/saml/SamlMetadataCommand.java | 18 +++--- .../authc/saml/SamlObjectHandler.java | 12 ++-- .../xpack/security/authc/saml/SamlRealm.java | 2 +- .../security/authc/saml/SamlRedirect.java | 4 +- .../authc/saml/SamlResponseHandler.java | 12 ++-- .../authc/saml/SamlSpMetadataBuilder.java | 6 +- .../IndexServiceAccountTokenStore.java | 4 +- .../authc/service/ServiceAccountService.java | 8 +-- .../DelegatedAuthorizationSupport.java | 6 +- .../authz/restriction/WorkflowService.java | 6 +- .../authz/store/CompositeRolesStore.java | 2 +- .../authz/store/RoleDescriptorStore.java | 2 +- .../ExternalEnrollmentTokenGenerator.java | 8 +-- .../security/rest/SecurityRestFilter.java | 6 +- .../action/oauth2/RestGetTokenAction.java | 2 +- .../saml/RestSamlAuthenticateAction.java | 2 +- ...ossClusterAccessServerTransportFilter.java | 2 +- .../transport/ServerTransportFilter.java | 1 - .../security/authc/ApiKeyServiceTests.java | 2 +- .../oidc/OpenIdConnectAuthenticatorTests.java | 4 +- .../service/ServiceAccountServiceTests.java | 4 +- .../xpack/security/authz/RBACEngineTests.java | 6 +- .../restriction/WorkflowServiceTests.java | 10 ++- .../authz/store/CompositeRolesStoreTests.java | 6 +- ...ExternalEnrollmentTokenGeneratorTests.java | 18 +++--- .../rest/SecurityRestFilterTests.java | 36 ++--------- .../plan/SnapshotsRecoveryPlannerService.java | 6 +- .../sql/qa/CustomDateFormatTestCase.java | 2 +- .../xpack/sql/qa/FieldExtractorTestCase.java | 23 ++++--- .../xpack/sql/qa/SqlProtocolTestCase.java | 19 ++++-- .../xpack/sql/qa/cli/EmbeddedCli.java | 2 +- .../sql/qa/cli/PartialResultsTestCase.java | 2 +- .../qa/rest/RestSqlPaginationTestCase.java | 2 +- .../xpack/sql/qa/rest/RestSqlTestCase.java | 4 +- .../sql/qa/rest/RestSqlUsageTestCase.java | 13 ++-- .../org/elasticsearch/xpack/sql/cli/Cli.java | 2 +- .../xpack/sql/cli/ConnectionBuilder.java | 2 +- .../cli/command/ServerQueryCliCommand.java | 2 +- .../xpack/sql/client/HttpClient.java | 10 +-- .../sql/client/JreHttpUrlConnection.java | 4 +- .../xpack/sql/client/ProxyConfig.java | 2 +- .../xpack/sql/client/SslConfig.java | 2 +- .../content/ConstructingObjectParser.java | 2 +- .../xpack/sql/proto/content/ObjectParser.java | 6 +- .../sql/proto/content/ParsedMediaType.java | 2 +- .../xpack/sql/analysis/analyzer/Analyzer.java | 24 +++---- .../xpack/sql/analysis/analyzer/Verifier.java | 4 +- .../sql/execution/search/PivotRowSet.java | 2 +- .../xpack/sql/optimizer/Optimizer.java | 19 +++--- .../xpack/sql/parser/LogicalPlanBuilder.java | 4 +- .../xpack/sql/planner/Mapper.java | 2 +- .../action/TransportGetCheckpointAction.java | 2 +- .../action/TransportStartTransformAction.java | 2 +- .../IndexBasedTransformConfigManager.java | 2 +- .../rest/action/RestCatTransformAction.java | 2 +- .../TransformPersistentTasksExecutor.java | 2 +- .../vectortile/feature/FeatureFactory.java | 4 +- .../elasticsearch/xpack/watcher/Watcher.java | 8 +-- .../watcher/WatcherIndexingListener.java | 5 +- .../watcher/WatcherLifeCycleService.java | 2 +- .../xpack/watcher/WatcherService.java | 2 +- .../actions/index/ExecutableIndexAction.java | 11 +++- .../xpack/watcher/common/http/HttpClient.java | 2 +- .../watcher/common/http/HttpRequest.java | 4 +- .../common/text/TextTemplateEngine.java | 6 +- .../watcher/execution/ExecutionService.java | 4 +- .../execution/TriggeredWatchStore.java | 2 +- .../watcher/notification/email/Account.java | 2 +- .../notification/email/EmailService.java | 2 +- .../attachment/ReportingAttachmentParser.java | 53 +++++++--------- .../notification/pagerduty/IncidentEvent.java | 2 +- .../actions/TransportActivateWatchAction.java | 2 +- .../engine/TickerScheduleTriggerEngine.java | 2 +- .../trigger/schedule/tool/CronEvalTool.java | 2 +- .../watcher/WatcherIndexingListenerTests.java | 9 ++- .../ReportingAttachmentParserTests.java | 2 +- .../wildcard/mapper/WildcardFieldMapper.java | 2 +- .../LicensedWriteLoadForecaster.java | 2 +- .../test/CoreTestTranslater.java | 2 +- 418 files changed, 1031 insertions(+), 1015 deletions(-) diff --git a/distribution/tools/geoip-cli/src/main/java/org/elasticsearch/geoip/GeoIpCli.java b/distribution/tools/geoip-cli/src/main/java/org/elasticsearch/geoip/GeoIpCli.java index 8aea17585a5af..5edfe0a326fec 100644 --- a/distribution/tools/geoip-cli/src/main/java/org/elasticsearch/geoip/GeoIpCli.java +++ b/distribution/tools/geoip-cli/src/main/java/org/elasticsearch/geoip/GeoIpCli.java @@ -64,11 +64,11 @@ protected void execute(Terminal terminal, OptionSet options, ProcessInfo process } @SuppressForbidden(reason = "file arg for cli") - private Path getPath(String file) { + private static Path getPath(String file) { return PathUtils.get(file); } - private void copyTgzToTarget(Path source, Path target) throws IOException { + private static void copyTgzToTarget(Path source, Path target) throws IOException { if (source.equals(target)) { return; } @@ -79,7 +79,7 @@ private void copyTgzToTarget(Path source, Path target) throws IOException { } } - private void packDatabasesToTgz(Terminal terminal, Path source, Path target) throws IOException { + private static void packDatabasesToTgz(Terminal terminal, Path source, Path target) throws IOException { try (Stream files = Files.list(source)) { for (Path path : files.filter(p -> p.getFileName().toString().endsWith(".mmdb")).toList()) { String fileName = path.getFileName().toString(); @@ -102,7 +102,7 @@ private void packDatabasesToTgz(Terminal terminal, Path source, Path target) thr } } - private void createOverviewJson(Terminal terminal, Path directory) throws IOException { + private static void createOverviewJson(Terminal terminal, Path directory) throws IOException { Path overview = directory.resolve("overview.json"); try ( Stream files = Files.list(directory); @@ -130,7 +130,7 @@ private void createOverviewJson(Terminal terminal, Path directory) throws IOExce terminal.println("overview.json created"); } - private byte[] createTarHeader(String name, long size) { + private static byte[] createTarHeader(String name, long size) { byte[] buf = new byte[512]; byte[] sizeBytes = String.format(Locale.ROOT, "%1$012o", size).getBytes(StandardCharsets.UTF_8); byte[] nameBytes = name.substring(Math.max(0, name.length() - 100)).getBytes(StandardCharsets.US_ASCII); diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/AddFileKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/AddFileKeyStoreCommand.java index 8a6b1e462eb83..e378747e583db 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/AddFileKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/AddFileKeyStoreCommand.java @@ -77,7 +77,7 @@ protected void executeCommand(Terminal terminal, OptionSet options, Environment } @SuppressForbidden(reason = "file arg for cli") - private Path getPath(String file) { + private static Path getPath(String file) { return PathUtils.get(file); } diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java index 349e655f95afe..d32cbd8dd1736 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java @@ -201,7 +201,6 @@ public class InstallPluginAction implements Closeable { private Environment env; private boolean batch; private Proxy proxy = null; - private NamedComponentScanner scanner = new NamedComponentScanner(); public InstallPluginAction(Terminal terminal, Environment env, boolean batch) { this.terminal = terminal; @@ -394,7 +393,7 @@ private String getElasticUrl( return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Build.current().qualifiedVersion()); } - private String nonReleaseUrl(final String hostname, final Version version, final String stagingHash, final String pluginId) { + private static String nonReleaseUrl(final String hostname, final Version version, final String stagingHash, final String pluginId) { return String.format( Locale.ROOT, "https://%s.elastic.co/%s-%s/downloads/elasticsearch-plugins/%s", @@ -441,7 +440,7 @@ boolean urlExists(String urlString) throws IOException { /** * Returns all the official plugin names that look similar to pluginId. **/ - private List checkMisspelledPlugin(String pluginId) { + private static List checkMisspelledPlugin(String pluginId) { LevenshteinDistance ld = new LevenshteinDistance(); List> scoredKeys = new ArrayList<>(); for (String officialPlugin : OFFICIAL_PLUGINS) { @@ -819,7 +818,7 @@ private Path unzip(Path zip, Path pluginsDir) throws IOException, UserException return target; } - private Path stagingDirectory(Path pluginsDir) throws IOException { + private static Path stagingDirectory(Path pluginsDir) throws IOException { try { return Files.createTempDirectory(pluginsDir, ".installing-", PosixFilePermissions.asFileAttribute(PLUGIN_DIR_PERMS)); } catch (UnsupportedOperationException e) { @@ -827,12 +826,12 @@ private Path stagingDirectory(Path pluginsDir) throws IOException { } } - private Path stagingDirectoryWithoutPosixPermissions(Path pluginsDir) throws IOException { + private static Path stagingDirectoryWithoutPosixPermissions(Path pluginsDir) throws IOException { return Files.createTempDirectory(pluginsDir, ".installing-"); } // checking for existing version of the plugin - private void verifyPluginName(Path pluginPath, String pluginName) throws UserException, IOException { + private static void verifyPluginName(Path pluginPath, String pluginName) throws UserException, IOException { // don't let user install plugin conflicting with module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(pluginName)) { @@ -877,15 +876,15 @@ private PluginDescriptor loadPluginInfo(Path pluginRoot) throws Exception { return info; } - private void generateNameComponentFile(Path pluginRoot) throws IOException { + private static void generateNameComponentFile(Path pluginRoot) throws IOException { Stream classPath = ClassReaders.ofClassPath().stream(); // contains plugin-api List classReaders = Stream.concat(ClassReaders.ofDirWithJars(pluginRoot).stream(), classPath).toList(); - Map> namedComponentsMap = scanner.scanForNamedClasses(classReaders); + Map> namedComponentsMap = NamedComponentScanner.scanForNamedClasses(classReaders); Path outputFile = pluginRoot.resolve(PluginDescriptor.NAMED_COMPONENTS_FILENAME); - scanner.writeToFile(namedComponentsMap, outputFile); + NamedComponentScanner.writeToFile(namedComponentsMap, outputFile); } - private boolean hasNamedComponentFile(Path pluginRoot) { + private static boolean hasNamedComponentFile(Path pluginRoot) { return Files.exists(pluginRoot.resolve(PluginDescriptor.NAMED_COMPONENTS_FILENAME)); } @@ -949,7 +948,7 @@ private PluginDescriptor installPlugin(InstallablePlugin descriptor, Path tmpRoo /** * Moves bin and config directories from the plugin if they exist */ - private void installPluginSupportFiles( + private static void installPluginSupportFiles( PluginDescriptor info, Path tmpRoot, Path destBinDir, @@ -973,7 +972,7 @@ private void installPluginSupportFiles( /** * Moves the plugin directory into its final destination. */ - private void movePlugin(Path tmpRoot, Path destination) throws IOException { + private static void movePlugin(Path tmpRoot, Path destination) throws IOException { Files.move(tmpRoot, destination, StandardCopyOption.ATOMIC_MOVE); Files.walkFileTree(destination, new SimpleFileVisitor<>() { @Override @@ -1000,7 +999,7 @@ public FileVisitResult postVisitDirectory(final Path dir, final IOException exc) /** * Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */ - private void installBin(PluginDescriptor info, Path tmpBinDir, Path destBinDir) throws Exception { + private static void installBin(PluginDescriptor info, Path tmpBinDir, Path destBinDir) throws Exception { if (Files.isDirectory(tmpBinDir) == false) { throw new UserException(PLUGIN_MALFORMED, "bin in plugin " + info.getName() + " is not a directory"); } @@ -1028,7 +1027,7 @@ private void installBin(PluginDescriptor info, Path tmpBinDir, Path destBinDir) * Copies the files from {@code tmpConfigDir} into {@code destConfigDir}. * Any files existing in both the source and destination will be skipped. */ - private void installConfig(PluginDescriptor info, Path tmpConfigDir, Path destConfigDir) throws Exception { + private static void installConfig(PluginDescriptor info, Path tmpConfigDir, Path destConfigDir) throws Exception { if (Files.isDirectory(tmpConfigDir) == false) { throw new UserException(PLUGIN_MALFORMED, "config in plugin " + info.getName() + " is not a directory"); } diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java index afb07324f8952..178c0bfabfc7a 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java @@ -57,7 +57,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce } } - private void printPlugin(Environment env, Terminal terminal, Path plugin, String prefix) throws IOException { + private static void printPlugin(Environment env, Terminal terminal, Path plugin, String prefix) throws IOException { terminal.println(Terminal.Verbosity.SILENT, prefix + plugin.getFileName().toString()); PluginDescriptor info = PluginDescriptor.readFromProperties(env.pluginsFile().resolve(plugin)); terminal.println(Terminal.Verbosity.VERBOSE, info.toString(prefix)); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java index 869ff520a0534..7b3adadb29b4c 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java @@ -112,7 +112,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce } } - private void printVersion(Terminal terminal) { + private static void printVersion(Terminal terminal) { final String versionOutput = String.format( Locale.ROOT, "Version: %s, Build: %s/%s/%s, JVM: %s", @@ -197,7 +197,7 @@ void syncPlugins(Terminal terminal, Environment env, ProcessInfo processInfo) th syncPlugins.execute(terminal, syncPlugins.parseOptions(new String[0]), env, processInfo); } - private void validatePidFile(Path pidFile) throws UserException { + private static void validatePidFile(Path pidFile) throws UserException { Path parent = pidFile.getParent(); if (parent != null && Files.exists(parent) && Files.isDirectory(parent) == false) { throw new UserException(ExitCodes.USAGE, "pid file parent [" + parent + "] exists but is not a directory"); diff --git a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java index b507e5e43a456..e32e34fb24400 100644 --- a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java +++ b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/ProcrunCommand.java @@ -92,7 +92,7 @@ static String quote(String s) { } /** Determines the service id for the Elasticsearch service that should be used */ - private String getServiceId(OptionSet options, Map env) throws UserException { + private static String getServiceId(OptionSet options, Map env) throws UserException { List args = options.nonOptionArguments(); if (args.size() > 1) { throw new UserException(ExitCodes.USAGE, "too many arguments, expected one service id"); @@ -107,7 +107,7 @@ private String getServiceId(OptionSet options, Map env) throws U } /** Determines the logging arguments that should be passed to the procrun command */ - private String getLogArgs(String serviceId, Path esHome, Map env) { + private static String getLogArgs(String serviceId, Path esHome, Map env) { String logArgs = env.get("LOG_OPTS"); if (logArgs != null && logArgs.isBlank() == false) { return logArgs; diff --git a/libs/core/src/main/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoader.java b/libs/core/src/main/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoader.java index 134170f52ee14..049b619fb57ac 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoader.java +++ b/libs/core/src/main/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoader.java @@ -185,7 +185,7 @@ protected URL findResource(String name) { * url or null if not found. Iterates over all known package specific multi-release versions, * then the root, for the given jar prefix. */ - T findResourceInLoaderPkgOrNull(JarMeta jarMeta, String pkg, String name, Function finder) { + static T findResourceInLoaderPkgOrNull(JarMeta jarMeta, String pkg, String name, Function finder) { List releaseVersions = jarMeta.pkgToVersions().getOrDefault(pkg, List.of()); for (int releaseVersion : releaseVersions) { String fullName = jarMeta.prefix() + "/" + MRJAR_VERSION_PREFIX + releaseVersion + "/" + name; diff --git a/libs/core/src/main/java/org/elasticsearch/jdk/ModuleQualifiedExportsService.java b/libs/core/src/main/java/org/elasticsearch/jdk/ModuleQualifiedExportsService.java index ac93c56bafbf1..0858493ea003d 100644 --- a/libs/core/src/main/java/org/elasticsearch/jdk/ModuleQualifiedExportsService.java +++ b/libs/core/src/main/java/org/elasticsearch/jdk/ModuleQualifiedExportsService.java @@ -49,7 +49,7 @@ protected ModuleQualifiedExportsService(Module module) { .collect(Collectors.toUnmodifiableSet()); } - private Map> invert( + private static Map> invert( Collection sourcesToTargets, Predicate qualifiedPredicate, Function sourceGetter, diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/Polygon.java b/libs/geo/src/main/java/org/elasticsearch/geometry/Polygon.java index d6a08a8bec20f..3360ac4da2b2c 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geometry/Polygon.java +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/Polygon.java @@ -58,7 +58,7 @@ public ShapeType type() { return ShapeType.POLYGON; } - private void checkRing(LinearRing ring) { + private static void checkRing(LinearRing ring) { if (ring.length() < 4) { throw new IllegalArgumentException("at least 4 polygon points required"); } diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/simplify/SimplificationErrorCalculator.java b/libs/geo/src/main/java/org/elasticsearch/geometry/simplify/SimplificationErrorCalculator.java index b442cf35bcdcd..d90dabcc8cc35 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geometry/simplify/SimplificationErrorCalculator.java +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/simplify/SimplificationErrorCalculator.java @@ -83,7 +83,7 @@ public double calculateError(PointLike left, PointLike middle, PointLike right) } } - private double distance(PointLike a, PointLike b) { + private static double distance(PointLike a, PointLike b) { return SloppyMath.haversinMeters(a.y(), a.x(), b.y(), b.x()); } } @@ -115,7 +115,7 @@ public double calculateError(PointLike left, PointLike middle, PointLike right) } } - private double distance(PointLike a, PointLike b) { + private static double distance(PointLike a, PointLike b) { return SloppyMath.haversinMeters(a.y(), a.x(), b.y(), b.x()); } } diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java index 67e4a018f29ea..5b9a6f4698967 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java @@ -96,7 +96,7 @@ private Grok( this.captureConfig = List.copyOf(grokCaptureConfigs); } - private String groupMatch(String name, Region region, String pattern) { + private static String groupMatch(String name, Region region, String pattern) { int number = GROK_PATTERN_REGEX.nameToBackrefNumber( name.getBytes(StandardCharsets.UTF_8), 0, diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureType.java b/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureType.java index 0da0cde4908d1..9df8c74e31f51 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureType.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureType.java @@ -68,7 +68,7 @@ static GrokCaptureType fromString(String str) { }; } - protected final GrokCaptureExtracter rawExtracter(int[] backRefs, Consumer emit) { + protected static GrokCaptureExtracter rawExtracter(int[] backRefs, Consumer emit) { return (utf8Bytes, offset, region) -> { for (int number : backRefs) { if (region.beg[number] >= 0) { diff --git a/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/NamedComponentScanner.java b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/NamedComponentScanner.java index e306cf93bba48..7a55a6749846a 100644 --- a/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/NamedComponentScanner.java +++ b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/NamedComponentScanner.java @@ -31,14 +31,13 @@ public class NamedComponentScanner { public static void main(String[] args) throws IOException { List classReaders = ClassReaders.ofClassPath(); - NamedComponentScanner scanner = new NamedComponentScanner(); - Map> namedComponentsMap = scanner.scanForNamedClasses(classReaders); + Map> namedComponentsMap = scanForNamedClasses(classReaders); Path outputFile = Path.of(args[0]); - scanner.writeToFile(namedComponentsMap, outputFile); + NamedComponentScanner.writeToFile(namedComponentsMap, outputFile); } // scope for testing - public void writeToFile(Map> namedComponentsMap, Path outputFile) throws IOException { + public static void writeToFile(Map> namedComponentsMap, Path outputFile) throws IOException { Files.createDirectories(outputFile.getParent()); try (OutputStream outputStream = Files.newOutputStream(outputFile)) { @@ -58,7 +57,7 @@ public void writeToFile(Map> namedComponentsMap, Pat } // returns a Map - extensible interface -> map{ namedName -> className } - public Map> scanForNamedClasses(List classReaders) { + public static Map> scanForNamedClasses(List classReaders) { ClassScanner extensibleClassScanner = new ClassScanner(Type.getDescriptor(Extensible.class), (classname, map) -> { map.put(classname, classname); return null; @@ -95,7 +94,7 @@ public void visit(String key, Object value) { return componentInfo; } - private String pathToClassName(String classWithSlashes) { + private static String pathToClassName(String classWithSlashes) { return classWithSlashes.replace('/', '.'); } diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/NamedComponentScannerTests.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/NamedComponentScannerTests.java index cf81b81963c7b..1b3781c61cf8f 100644 --- a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/NamedComponentScannerTests.java +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/NamedComponentScannerTests.java @@ -39,10 +39,8 @@ private Path tmpDir() throws IOException { return createTempDir(); } - NamedComponentScanner namedComponentScanner = new NamedComponentScanner(); - public void testFindNamedComponentInSingleClass() throws URISyntaxException { - Map> namedComponents = namedComponentScanner.scanForNamedClasses( + Map> namedComponents = NamedComponentScanner.scanForNamedClasses( classReaderStream(TestNamedComponent.class, ExtensibleInterface.class) ); @@ -82,7 +80,7 @@ public class B implements ExtensibleInterface{} )// contains plugin-api .toList(); - Map> namedComponents = namedComponentScanner.scanForNamedClasses(classReaderStream); + Map> namedComponents = NamedComponentScanner.scanForNamedClasses(classReaderStream); org.hamcrest.MatcherAssert.assertThat( namedComponents, @@ -158,7 +156,7 @@ public class B implements CustomExtensibleInterface{} List classReaders = Stream.concat(ClassReaders.ofDirWithJars(dirWithJar).stream(), classPath)// contains plugin-api .toList(); - Map> namedComponents = namedComponentScanner.scanForNamedClasses(classReaders); + Map> namedComponents = NamedComponentScanner.scanForNamedClasses(classReaders); org.hamcrest.MatcherAssert.assertThat( namedComponents, @@ -189,7 +187,7 @@ public void testWriteToFile() throws IOException { mapToWrite.put(ExtensibleInterface.class.getCanonicalName(), extensibleInterfaceComponents); Path path = tmpDir().resolve("file.json"); - namedComponentScanner.writeToFile(mapToWrite, path); + NamedComponentScanner.writeToFile(mapToWrite, path); String jsonMap = Files.readString(path); assertThat(jsonMap, equalTo(""" diff --git a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/TDigest.java b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/TDigest.java index 2eaf3192eefef..296ed57a4d960 100644 --- a/libs/tdigest/src/main/java/org/elasticsearch/tdigest/TDigest.java +++ b/libs/tdigest/src/main/java/org/elasticsearch/tdigest/TDigest.java @@ -105,7 +105,7 @@ public final void add(double x) { add(x, 1); } - final void checkValue(double x) { + static void checkValue(double x) { if (Double.isNaN(x) || Double.isInfinite(x)) { throw new IllegalArgumentException("Invalid value: " + x); } diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/filtering/FilterPath.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/filtering/FilterPath.java index d42ef96d8ff3a..733e5b6b77684 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/filtering/FilterPath.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/support/filtering/FilterPath.java @@ -151,7 +151,7 @@ FilterPath build() { return buildPath("", root); } - void insertNode(String filter, BuildNode node) { + static void insertNode(String filter, BuildNode node) { int end = filter.length(); int splitPosition = -1; boolean findEscapes = false; @@ -184,7 +184,7 @@ void insertNode(String filter, BuildNode node) { } } - FilterPath buildPath(String segment, BuildNode node) { + static FilterPath buildPath(String segment, BuildNode node) { Map termsChildren = new HashMap<>(); List wildcardChildren = new ArrayList<>(); for (Map.Entry entry : node.children.entrySet()) { diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceParser.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceParser.java index eab1c24820285..34a8a44326f46 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceParser.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceParser.java @@ -169,7 +169,7 @@ public final ArrayValuesSourceAggregationBuilder parse(String aggregationName return factory; } - private void parseMissingAndAdd( + private static void parseMissingAndAdd( final String aggregationName, final String currentFieldName, XContentParser parser, diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsResults.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsResults.java index b5b3fad8253eb..9aa7358563d79 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsResults.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsResults.java @@ -184,7 +184,7 @@ static > double getValFromUpperTriangularMatrix(Ma throw new IllegalArgumentException("Coefficient not computed between fields: " + fieldX + " and " + fieldY); } - private void checkField(String field, Map map) { + private static void checkField(String field, Map map) { if (field == null) { throw new IllegalArgumentException("field name cannot be null"); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 14de9193e01fe..5d85a199c4e3d 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -1096,7 +1096,7 @@ private void setForceMergeCompletedTimestamp(String targetIndex, ActionListener< * Returns true if a value has been set for the custom index metadata field "force_merge_completed_timestamp" within the field * "data_stream_lifecycle". */ - private boolean isForceMergeComplete(IndexMetadata backingIndex) { + private static boolean isForceMergeComplete(IndexMetadata backingIndex) { Map customMetadata = backingIndex.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY); return customMetadata != null && customMetadata.containsKey(FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java index 40c0d9194f1b2..c8774b18c7e9e 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java @@ -56,7 +56,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { ); } - private XContentBuilder timestampMapping(boolean enabled, CheckedConsumer propertiesBuilder) + private static XContentBuilder timestampMapping(boolean enabled, CheckedConsumer propertiesBuilder) throws IOException { return topMapping(b -> { b.startObject(DataStreamTimestampFieldMapper.NAME).field("enabled", enabled).endObject(); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/NetworkDirectionProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/NetworkDirectionProcessor.java index 57b58483d227b..6d1809d871680 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/NetworkDirectionProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/NetworkDirectionProcessor.java @@ -153,7 +153,7 @@ private String getDirection(IngestDocument d) throws Exception { return DIRECTION_EXTERNAL; } - private boolean isInternal(List networks, String ip) { + private static boolean isInternal(List networks, String ip) { for (String network : networks) { if (inNetwork(ip, network)) { return true; @@ -162,7 +162,7 @@ private boolean isInternal(List networks, String ip) { return false; } - private boolean inNetwork(String ip, String network) { + private static boolean inNetwork(String ip, String network) { InetAddress address = InetAddresses.forString(ip); return switch (network) { case LOOPBACK_NAMED_NETWORK -> isLoopback(address); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RerouteProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RerouteProcessor.java index fd456f7f132e9..e44bb57f0b8e8 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RerouteProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RerouteProcessor.java @@ -124,7 +124,7 @@ private static String parseDataStreamNamespace(String dataStreamName, int indexO return dataStreamName.substring(indexOfSecondDash + 1); } - private String determineDataStreamField( + private static String determineDataStreamField( IngestDocument ingestDocument, List valueSources, String fallbackFromCurrentTarget diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index d60a740930858..76c0e6e494a74 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -248,7 +248,7 @@ int indexChunks(String name, InputStream is, int chunk, String expectedMd5, long } // visible for testing - byte[] getChunk(InputStream is) throws IOException { + static byte[] getChunk(InputStream is) throws IOException { byte[] buf = new byte[MAX_CHUNK_SIZE]; int chunkSize = 0; while (chunkSize < MAX_CHUNK_SIZE) { diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java index 7481f144b6376..188d826b05ff5 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/HttpClient.java @@ -69,11 +69,11 @@ InputStream get(String urlToGet) throws IOException { } @SuppressForbidden(reason = "we need socket connection to download data from internet") - private InputStream getInputStream(HttpURLConnection conn) throws IOException { + private static InputStream getInputStream(HttpURLConnection conn) throws IOException { return conn.getInputStream(); } - private HttpURLConnection createConnection(String url) throws IOException { + private static HttpURLConnection createConnection(String url) throws IOException { HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection(); conn.setConnectTimeout(10000); conn.setReadTimeout(10000); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index f5a57e68581f2..5fedb357fff8e 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -119,22 +119,22 @@ public void tearDown() throws Exception { } public void testGetChunkEndOfStream() throws IOException { - byte[] chunk = geoIpDownloader.getChunk(new InputStream() { + byte[] chunk = GeoIpDownloader.getChunk(new InputStream() { @Override public int read() { return -1; } }); assertArrayEquals(new byte[0], chunk); - chunk = geoIpDownloader.getChunk(new ByteArrayInputStream(new byte[0])); + chunk = GeoIpDownloader.getChunk(new ByteArrayInputStream(new byte[0])); assertArrayEquals(new byte[0], chunk); } public void testGetChunkLessThanChunkSize() throws IOException { ByteArrayInputStream is = new ByteArrayInputStream(new byte[] { 1, 2, 3, 4 }); - byte[] chunk = geoIpDownloader.getChunk(is); + byte[] chunk = GeoIpDownloader.getChunk(is); assertArrayEquals(new byte[] { 1, 2, 3, 4 }, chunk); - chunk = geoIpDownloader.getChunk(is); + chunk = GeoIpDownloader.getChunk(is); assertArrayEquals(new byte[0], chunk); } @@ -145,9 +145,9 @@ public void testGetChunkExactlyChunkSize() throws IOException { bigArray[i] = (byte) i; } ByteArrayInputStream is = new ByteArrayInputStream(bigArray); - byte[] chunk = geoIpDownloader.getChunk(is); + byte[] chunk = GeoIpDownloader.getChunk(is); assertArrayEquals(bigArray, chunk); - chunk = geoIpDownloader.getChunk(is); + chunk = GeoIpDownloader.getChunk(is); assertArrayEquals(new byte[0], chunk); } @@ -159,17 +159,17 @@ public void testGetChunkMoreThanChunkSize() throws IOException { byte[] smallArray = new byte[MAX_CHUNK_SIZE]; System.arraycopy(bigArray, 0, smallArray, 0, MAX_CHUNK_SIZE); ByteArrayInputStream is = new ByteArrayInputStream(bigArray); - byte[] chunk = geoIpDownloader.getChunk(is); + byte[] chunk = GeoIpDownloader.getChunk(is); assertArrayEquals(smallArray, chunk); System.arraycopy(bigArray, MAX_CHUNK_SIZE, smallArray, 0, MAX_CHUNK_SIZE); - chunk = geoIpDownloader.getChunk(is); + chunk = GeoIpDownloader.getChunk(is); assertArrayEquals(smallArray, chunk); - chunk = geoIpDownloader.getChunk(is); + chunk = GeoIpDownloader.getChunk(is); assertArrayEquals(new byte[0], chunk); } public void testGetChunkRethrowsIOException() { - expectThrows(IOException.class, () -> geoIpDownloader.getChunk(new InputStream() { + expectThrows(IOException.class, () -> GeoIpDownloader.getChunk(new InputStream() { @Override public int read() throws IOException { throw new IOException(); diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java index e6bba12a37484..40e54c8fe5f7e 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java @@ -129,7 +129,7 @@ public String findDeviceType(VersionedName userAgent, VersionedName os, Versione return "Other"; } - private String findMatch(List possiblePatterns, String matchString) { + private static String findMatch(List possiblePatterns, String matchString) { String name; for (DeviceTypeSubPattern pattern : possiblePatterns) { name = pattern.match(matchString); diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java index 37e54f56984b7..41ced0c7ff4cc 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java @@ -111,7 +111,7 @@ private void init(InputStream regexStream) throws IOException { } } - private Pattern compilePattern(String regex, String regex_flag) { + private static Pattern compilePattern(String regex, String regex_flag) { // Only flag present in the current default regexes.yaml if (regex_flag != null && regex_flag.equals("i")) { return Pattern.compile(regex, Pattern.CASE_INSENSITIVE); @@ -188,7 +188,7 @@ public Details parse(String agentString, boolean extractDeviceType) { return details; } - private VersionedName findMatch(List possiblePatterns, String agentString) { + private static VersionedName findMatch(List possiblePatterns, String agentString) { VersionedName versionedName; for (UserAgentSubpattern pattern : possiblePatterns) { versionedName = pattern.match(agentString); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java index 243487981c5fa..c6f60c48c4ab4 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java @@ -71,7 +71,7 @@ public Set> getSupportedContexts() { return Set.of(TemplateScript.CONTEXT, TemplateScript.INGEST_CONTEXT); } - private CustomMustacheFactory createMustacheFactory(Map options) { + private static CustomMustacheFactory createMustacheFactory(Map options) { if (options == null || options.isEmpty() || options.containsKey(Script.CONTENT_TYPE_OPTION) == false) { return new CustomMustacheFactory(); } diff --git a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java index a83f3b0ac6cec..c54214e5f854d 100644 --- a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java +++ b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java @@ -210,7 +210,7 @@ public PainlessInfos(List contextInfos, JavadocExtractor ex } } - private Set getCommon(List painlessContexts, Function> getter) { + private static Set getCommon(List painlessContexts, Function> getter) { Map infoCounts = new HashMap<>(); for (PainlessContextInfo contextInfo : painlessContexts) { for (T info : getter.apply(contextInfo)) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 2c286479dcf4d..e2f64d3853670 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -151,7 +151,7 @@ public Set> getSupportedContexts() { * @param The factory class. * @return A factory class that will return script instances. */ - private Type generateStatefulFactory(Loader loader, ScriptContext context, ScriptScope scriptScope) { + private static Type generateStatefulFactory(Loader loader, ScriptContext context, ScriptScope scriptScope) { int classFrames = ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS; int classAccess = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL; String interfaceBase = Type.getType(context.statefulFactoryClazz).getInternalName(); @@ -271,7 +271,7 @@ private Type generateStatefulFactory(Loader loader, ScriptContext context * @param The factory class. * @return A factory class that will return script instances. */ - private T generateFactory(Loader loader, ScriptContext context, Type classType, ScriptScope scriptScope) { + private static T generateFactory(Loader loader, ScriptContext context, Type classType, ScriptScope scriptScope) { int classFrames = ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS; int classAccess = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL; String interfaceBase = Type.getType(context.factoryClazz).getInternalName(); @@ -362,7 +362,7 @@ private T generateFactory(Loader loader, ScriptContext context, Type clas } } - private void writeNeedsMethods(Class clazz, ClassWriter writer, Set extractedVariables) { + private static void writeNeedsMethods(Class clazz, ClassWriter writer, Set extractedVariables) { for (Method method : clazz.getMethods()) { if (method.getName().startsWith("needs") && method.getReturnType().equals(boolean.class) @@ -458,7 +458,7 @@ private CompilerSettings buildCompilerSettings(Map params) { return compilerSettings; } - private ScriptException convertToScriptException(String scriptSource, Throwable t) { + private static ScriptException convertToScriptException(String scriptSource, Throwable t) { // create a script stack: this is just the script portion List scriptStack = new ArrayList<>(); ScriptException.Position pos = null; @@ -499,11 +499,11 @@ private ScriptException convertToScriptException(String scriptSource, Throwable } // very simple heuristic: +/- 25 chars. can be improved later. - private int getPreviousStatement(int offset) { + private static int getPreviousStatement(int offset) { return Math.max(0, offset - 25); } - private int getNextStatement(String scriptSource, int offset) { + private static int getNextStatement(String scriptSource, int offset) { return Math.min(scriptSource.length(), offset + 25); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java index 4ee427c04325f..8b0b57e6389fc 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptClassInfo.java @@ -213,7 +213,7 @@ public List> getGetReturns() { */ public record MethodArgument(Class clazz, String name) {} - private MethodArgument methodArgument(PainlessLookup painlessLookup, Class clazz, String argName) { + private static MethodArgument methodArgument(PainlessLookup painlessLookup, Class clazz, String argName) { Class defClass = definitionTypeForClass( painlessLookup, clazz, diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 038f0cc3c2f06..e9a3b2c1fd7f7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -195,7 +195,7 @@ private SourceContext buildAntlrTree(String sourceString) { return parser.source(); } - private void setupPicky(PainlessParser parser) { + private static void setupPicky(PainlessParser parser) { // Diagnostic listener invokes syntaxError on other listeners for ambiguity issues, parser.addErrorListener(new DiagnosticErrorListener(true)); // a second listener to fail the test when the above happens. diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index e32caaf8407ea..bf001c5e49db9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -203,7 +203,7 @@ private boolean isValidType(Class type) { return type == def.class || classesToPainlessClassBuilders.containsKey(type); } - private Class loadClass(ClassLoader classLoader, String javaClassName, Supplier errorMessage) { + private static Class loadClass(ClassLoader classLoader, String javaClassName, Supplier errorMessage) { try { return Class.forName(javaClassName, true, classLoader); } catch (ClassNotFoundException cnfe) { @@ -1838,7 +1838,11 @@ private void generateRuntimeMethods() { } } - private void generateFilteredMethod(Class targetClass, PainlessClassBuilder painlessClassBuilder, PainlessMethod painlessMethod) { + private static void generateFilteredMethod( + Class targetClass, + PainlessClassBuilder painlessClassBuilder, + PainlessMethod painlessMethod + ) { String painlessMethodKey = buildPainlessMethodKey(painlessMethod.javaMethod().getName(), painlessMethod.typeParameters().size()); PainlessMethod filteredPainlessMethod = painlessFilteredCache.get(painlessMethod); @@ -1907,10 +1911,10 @@ private void generateFilteredMethod(Class targetClass, PainlessClassBuilder p } private void cacheRuntimeHandles() { - classesToPainlessClassBuilders.values().forEach(this::cacheRuntimeHandles); + classesToPainlessClassBuilders.values().forEach(PainlessLookupBuilder::cacheRuntimeHandles); } - private void cacheRuntimeHandles(PainlessClassBuilder painlessClassBuilder) { + private static void cacheRuntimeHandles(PainlessClassBuilder painlessClassBuilder) { for (Map.Entry painlessMethodEntry : painlessClassBuilder.methods.entrySet()) { String methodKey = painlessMethodEntry.getKey(); PainlessMethod painlessMethod = painlessMethodEntry.getValue(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java index 2097a3e2995f8..3cba26f7b2da1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java @@ -704,7 +704,7 @@ public void visitInvokeCallMember(InvokeCallMemberNode irInvokeCallMemberNode, C } } - private void replaceCallWithConstant( + private static void replaceCallWithConstant( InvokeCallMemberNode irInvokeCallMemberNode, Consumer scope, Method javaMethod, diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java index 9e95734f0978b..ddb7b060062b5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java @@ -166,7 +166,7 @@ private static ClassCastException castError(String formatText, Object... argumen /** * Decorates a user expression node with a PainlessCast. */ - public void decorateWithCast(AExpression userExpressionNode, SemanticScope semanticScope) { + public static void decorateWithCast(AExpression userExpressionNode, SemanticScope semanticScope) { Location location = userExpressionNode.getLocation(); Class valueType = semanticScope.getDecoration(userExpressionNode, ValueType.class).valueType(); Class targetType = semanticScope.getDecoration(userExpressionNode, TargetType.class).targetType(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java index 4739f7682ef3e..76babcdb9d26e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java @@ -479,7 +479,7 @@ protected ExpressionNode injectCast(AExpression userExpressionNode, ScriptScope * @param irStoreNode The store node if this is a write. * @return The root node for this assignment. */ - protected ExpressionNode buildLoadStore( + protected static ExpressionNode buildLoadStore( int accessDepth, Location location, boolean isNullSafe, diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticAnalysisPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticAnalysisPhase.java index a7b4771a1fbfc..0f77fb90b5b59 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticAnalysisPhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticAnalysisPhase.java @@ -193,7 +193,7 @@ public void visitReturn(SReturn userReturnNode, SemanticScope semanticScope) { /** * Decorates a user expression node with a PainlessCast. */ - public void decorateWithCastForReturn( + public static void decorateWithCastForReturn( AExpression userExpressionNode, AStatement parent, SemanticScope semanticScope, diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessUserTreeToIRTreePhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessUserTreeToIRTreePhase.java index c104c95dc579d..4be4d07e449f1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessUserTreeToIRTreePhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessUserTreeToIRTreePhase.java @@ -262,7 +262,7 @@ protected void injectStaticFieldsAndGetters() { // requires the gets method name be modified from "getExample" to "example" // if a get method variable isn't used it's declaration node is removed from // the ir tree permanently so there is no frivolous variable slotting - protected void injectGetsDeclarations(BlockNode irBlockNode, ScriptScope scriptScope) { + protected static void injectGetsDeclarations(BlockNode irBlockNode, ScriptScope scriptScope) { Location internalLocation = new Location("$internal$ScriptInjectionPhase$injectGetsDeclarations", 0); for (int i = 0; i < scriptScope.getScriptClassInfo().getGetMethods().size(); ++i) { @@ -345,7 +345,7 @@ protected void injectNeedsMethods(ScriptScope scriptScope) { * } * */ - protected void injectSandboxExceptions(FunctionNode irFunctionNode) { + protected static void injectSandboxExceptions(FunctionNode irFunctionNode) { try { Location internalLocation = new Location("$internal$ScriptInjectionPhase$injectSandboxExceptions", 0); BlockNode irBlockNode = irFunctionNode.getBlockNode(); @@ -513,7 +513,7 @@ public void visitReturn(SReturn userReturnNode, ScriptScope scriptScope) { injectConverter(userReturnNode, scriptScope); } - public void injectConverter(AStatement userStatementNode, ScriptScope scriptScope) { + public static void injectConverter(AStatement userStatementNode, ScriptScope scriptScope) { Converter converter = scriptScope.getDecoration(userStatementNode, Converter.class); if (converter == null) { return; diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/GeoShapeType.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/GeoShapeType.java index fb72ee793d399..0bef69ca26a6b 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/GeoShapeType.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/GeoShapeType.java @@ -167,7 +167,7 @@ public PolygonBuilder getBuilder( return polygon; } - void validateLinearRing(CoordinateNode coordinates, boolean coerce) { + static void validateLinearRing(CoordinateNode coordinates, boolean coerce) { if (coordinates.children == null || coordinates.children.isEmpty()) { String error = "Invalid LinearRing found."; error += (coordinates.coordinate == null) diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/PolygonBuilder.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/PolygonBuilder.java index 46cc6a131415f..beef840756e78 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/PolygonBuilder.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/PolygonBuilder.java @@ -166,7 +166,7 @@ private static void validateLinearRing(LineStringBuilder lineString) { /** * Validates only 1 vertex is tangential (shared) between the interior and exterior of a polygon */ - protected void validateHole(LineStringBuilder shell, LineStringBuilder hole) { + protected static void validateHole(LineStringBuilder shell, LineStringBuilder hole) { HashSet exterior = Sets.newHashSet(shell.coordinates); HashSet interior = Sets.newHashSet(hole.coordinates); exterior.retainAll(interior); diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/ShapeBuilder.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/ShapeBuilder.java index 0ca7d20c6678d..31d610fbdc57a 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/ShapeBuilder.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/ShapeBuilder.java @@ -176,7 +176,7 @@ protected Coordinate[] coordinates(boolean closed) { return result; } - protected JtsGeometry jtsGeometry(Geometry geom) { + protected static JtsGeometry jtsGeometry(Geometry geom) { // dateline180Check is false because ElasticSearch does it's own dateline wrapping JtsGeometry jtsGeometry = new JtsGeometry(geom, SPATIAL_CONTEXT, false, MULTI_POLYGON_MAY_OVERLAP); if (AUTO_VALIDATE_JTS_GEOMETRY) jtsGeometry.validate(); diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index 9b16a08d4ba2f..46860ff38b8ca 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -294,7 +294,7 @@ private void setupFieldTypeDeprecatedParameters(GeoShapeFieldType ft) { } } - private void setupPrefixTrees(GeoShapeFieldType ft) { + private static void setupPrefixTrees(GeoShapeFieldType ft) { SpatialPrefixTree prefixTree; if (ft.tree().equals(PrefixTrees.GEOHASH)) { prefixTree = new GeohashPrefixTree( diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java index 285e58ec87fc5..4c738ddcffce1 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapperTests.java @@ -101,7 +101,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { ); } - private XContentBuilder testMapping(boolean syntheticSource) throws IOException { + private static XContentBuilder testMapping(boolean syntheticSource) throws IOException { if (syntheticSource) { return syntheticSourceMapping(b -> b.startObject("field").field("type", "match_only_text").endObject()); } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapperTests.java index b95d36d2e6a01..88124504faade 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapperTests.java @@ -279,7 +279,8 @@ public void testMultiFields() throws IOException { } } - private void assertMultiField(int shingleSize, MapperService mapperService, String suggestPath, String textPath) throws IOException { + private static void assertMultiField(int shingleSize, MapperService mapperService, String suggestPath, String textPath) + throws IOException { List fields = new ArrayList<>(); fields.add(suggestPath); fields.add(textPath); diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapperTests.java index dd48a8d4996b1..1636def53536b 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapperTests.java @@ -165,7 +165,7 @@ private DocumentMapper createIndexWithTokenCountField() throws IOException { })); } - private SourceToParse createDocument(String fieldValue) throws Exception { + private static SourceToParse createDocument(String fieldValue) throws Exception { return source(b -> b.field("test", fieldValue)); } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java index 6a644cdcdc5ca..95db4d92874a4 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java @@ -156,7 +156,7 @@ public TopDocsAndMaxScore topDocs(SearchHit hit) throws IOException { } } - private String getSortedDocValue(String field, SearchContext context, int docId) { + private static String getSortedDocValue(String field, SearchContext context, int docId) { try { List ctxs = context.searcher().getIndexReader().leaves(); LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(docId, ctxs)); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index d2e30ed1d420d..016d9d3f75a21 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -327,7 +327,7 @@ Tuple createCandidateQuery(IndexReader indexReader) throw // This was extracted the method above, because otherwise it is difficult to test what terms are included in // the query in case a CoveringQuery is used (it does not have a getter to retrieve the clauses) - Tuple, Map>> extractTermsAndRanges(IndexReader indexReader) throws IOException { + static Tuple, Map>> extractTermsAndRanges(IndexReader indexReader) throws IOException { List extractedTerms = new ArrayList<>(); Map> encodedPointValuesByField = new HashMap<>(); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index 6a77f6c97b0f4..5a12e0c9f3a37 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -367,7 +367,7 @@ public void testExtractTermsAndRanges() throws Exception { IndexReader indexReader = memoryIndex.createSearcher().getIndexReader(); - Tuple, Map>> t = fieldType.extractTermsAndRanges(indexReader); + Tuple, Map>> t = PercolatorFieldMapper.PercolatorFieldType.extractTermsAndRanges(indexReader); assertEquals(1, t.v2().size()); Map> rangesMap = t.v2(); assertEquals(1, rangesMap.size()); @@ -449,7 +449,7 @@ public void testExtractTermsAndRanges_numberFields() throws Exception { IndexReader indexReader = memoryIndex.createSearcher().getIndexReader(); - Tuple, Map>> t = fieldType.extractTermsAndRanges(indexReader); + Tuple, Map>> t = PercolatorFieldMapper.PercolatorFieldType.extractTermsAndRanges(indexReader); assertEquals(0, t.v1().size()); Map> rangesMap = t.v2(); assertEquals(7, rangesMap.size()); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java index 95fc3060a6af0..952dd0585e7ba 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBaseReindexRestHandler.java @@ -107,7 +107,7 @@ protected Request setCommonOptions(RestRequest restRequest, Request request) { return request; } - private RestChannelConsumer sendTask(String localNodeId, Task task) { + private static RestChannelConsumer sendTask(String localNodeId, Task task) { return channel -> { try (XContentBuilder builder = channel.newBuilder()) { builder.startObject(); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBulkByQueryRestHandler.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBulkByQueryRestHandler.java index 2f7d3c5de5202..7502ab748f7fe 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBulkByQueryRestHandler.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractBulkByQueryRestHandler.java @@ -76,7 +76,7 @@ protected void parseInternalRequest( * should get better when SearchRequest has full ObjectParser support * then we can delegate and stuff. */ - private XContentParser extractRequestSpecificFields(RestRequest restRequest, Map> bodyConsumers) + private static XContentParser extractRequestSpecificFields(RestRequest restRequest, Map> bodyConsumers) throws IOException { if (restRequest.hasContentOrSourceParam() == false) { return null; // body is optional diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/BulkIndexByScrollResponseContentListener.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/BulkIndexByScrollResponseContentListener.java index d1f9bdf7e147d..2c597dba6e22f 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/BulkIndexByScrollResponseContentListener.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/BulkIndexByScrollResponseContentListener.java @@ -40,7 +40,7 @@ public RestResponse buildResponse(BulkByScrollResponse response, XContentBuilder return new RestResponse(getStatus(response), builder); } - private RestStatus getStatus(BulkByScrollResponse response) { + private static RestStatus getStatus(BulkByScrollResponse response) { /* * Return the highest numbered rest status under the assumption that higher numbered statuses are "more error" and thus more * interesting to the user. diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index d5e510dd5288a..91ac9996f97ca 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -122,9 +122,9 @@ public AzureBlobStore(RepositoryMetadata metadata, AzureStorageService service, (httpMethod, url) -> httpMethod.equals("GET") && isListRequest(httpMethod, url) == false, stats.getOperations::incrementAndGet ), - RequestStatsCollector.create(this::isListRequest, stats.listOperations::incrementAndGet), - RequestStatsCollector.create(this::isPutBlockRequest, stats.putBlockOperations::incrementAndGet), - RequestStatsCollector.create(this::isPutBlockListRequest, stats.putBlockListOperations::incrementAndGet), + RequestStatsCollector.create(AzureBlobStore::isListRequest, stats.listOperations::incrementAndGet), + RequestStatsCollector.create(AzureBlobStore::isPutBlockRequest, stats.putBlockOperations::incrementAndGet), + RequestStatsCollector.create(AzureBlobStore::isPutBlockListRequest, stats.putBlockListOperations::incrementAndGet), RequestStatsCollector.create( // https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#uri-parameters // The only URI parameter allowed for put-blob operation is "timeout", but if a sas token is used, @@ -158,18 +158,18 @@ && isPutBlockListRequest(httpMethod, url) == false, }; } - private boolean isListRequest(String httpMethod, URL url) { + private static boolean isListRequest(String httpMethod, URL url) { return httpMethod.equals("GET") && url.getQuery() != null && url.getQuery().contains("comp=list"); } // https://docs.microsoft.com/en-us/rest/api/storageservices/put-block - private boolean isPutBlockRequest(String httpMethod, URL url) { + private static boolean isPutBlockRequest(String httpMethod, URL url) { String queryParams = url.getQuery() == null ? "" : url.getQuery(); return httpMethod.equals("PUT") && queryParams.contains("comp=block") && queryParams.contains("blockid="); } // https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-list - private boolean isPutBlockListRequest(String httpMethod, URL url) { + private static boolean isPutBlockListRequest(String httpMethod, URL url) { String queryParams = url.getQuery() == null ? "" : url.getQuery(); return httpMethod.equals("PUT") && queryParams.contains("comp=blocklist"); } @@ -504,7 +504,7 @@ private void executeMultipartUpload(String blobName, InputStream inputStream, lo private static final Base64.Encoder base64Encoder = Base64.getEncoder().withoutPadding(); private static final Base64.Decoder base64UrlDecoder = Base64.getUrlDecoder(); - private String makeMultipartBlockId() { + private static String makeMultipartBlockId() { return base64Encoder.encodeToString(base64UrlDecoder.decode(UUIDs.base64UUID())); } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index fca005e8de32c..911d59aa52dcb 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -171,7 +171,7 @@ public final void collectMetrics(Request request, Response response) { protected abstract void collectMetrics(Request request); } - private long getRequestCount(Request request) { + private static long getRequestCount(Request request) { Number requestCount = request.getAWSRequestMetrics().getTimingInfo().getCounter(AWSRequestMetrics.Field.RequestCount.name()); if (requestCount == null) { logger.warn("Expected request count to be tracked for request [{}] but found not count.", request); diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java index 580348564b07e..42bece3dbea16 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java @@ -180,7 +180,7 @@ private String parseBodyAsString(CloseableHttpResponse response, int maxSize) { return errorMessage; } - private Charset getCharset(HttpEntity httpEntity) { + private static Charset getCharset(HttpEntity httpEntity) { final Header contentType = httpEntity.getContentType(); if (contentType == null) { return StandardCharsets.UTF_8; @@ -195,14 +195,14 @@ private Charset getCharset(HttpEntity httpEntity) { return StandardCharsets.UTF_8; } - private boolean isValidContentTypeToParseError(HttpEntity httpEntity) { + private static boolean isValidContentTypeToParseError(HttpEntity httpEntity) { Header contentType = httpEntity.getContentType(); return contentType != null && httpEntity.getContentLength() > 0 && (contentType.getValue().startsWith("text/") || contentType.getValue().startsWith("application/")); } - private boolean isSuccessful(int statusCode) { + private static boolean isSuccessful(int statusCode) { return statusCode / 100 == RestStatus.OK.getStatus() / 100; } diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java index 18ac1d6d9641b..e869ebf20f59e 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/IcuTokenizerFactory.java @@ -51,7 +51,7 @@ public Tokenizer create() { } } - private ICUTokenizerConfig getIcuConfig(Environment env, Settings settings) { + private static ICUTokenizerConfig getIcuConfig(Environment env, Settings settings) { Map tailored = new HashMap<>(); try { @@ -96,7 +96,7 @@ public RuleBasedBreakIterator getBreakIterator(int script) { } // parse a single RBBi rule file - private BreakIterator parseRules(String filename, Environment env) throws IOException { + private static BreakIterator parseRules(String filename, Environment env) throws IOException { final Path path = env.configFile().resolve(filename); String rules = Files.readAllLines(path).stream().filter((v) -> v.startsWith("#") == false).collect(Collectors.joining("\n")); diff --git a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/plugin/analysis/phonetic/KoelnerPhonetik.java b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/plugin/analysis/phonetic/KoelnerPhonetik.java index aed1a8cfec19a..4fc6cde6e1244 100644 --- a/plugins/analysis-phonetic/src/main/java/org/elasticsearch/plugin/analysis/phonetic/KoelnerPhonetik.java +++ b/plugins/analysis-phonetic/src/main/java/org/elasticsearch/plugin/analysis/phonetic/KoelnerPhonetik.java @@ -261,11 +261,11 @@ private String substitute(String str) { return s; } - private String expandUmlauts(String str) { + private static String expandUmlauts(String str) { return str.replaceAll("\u00C4", "AE").replaceAll("\u00D6", "OE").replaceAll("\u00DC", "UE"); } - private String removeSequences(String str) { + private static String removeSequences(String str) { if (str == null || str.length() == 0) { return ""; } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java index c55eb8ff7f471..5c8a2a8fb92f9 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java @@ -80,7 +80,7 @@ private enum Ec2HostnameType { * @see CustomNameResolver#resolveIfPossible(String) */ @SuppressForbidden(reason = "We call getInputStream in doPrivileged and provide SocketPermission") - public InetAddress[] resolve(Ec2HostnameType type) throws IOException { + public static InetAddress[] resolve(Ec2HostnameType type) throws IOException { InputStream in = null; String metadataUrl = EC2MetadataUtils.getHostAddressForEC2MetadataService() + "/latest/meta-data/" + type.ec2Name; String metadataTokenUrl = EC2MetadataUtils.getHostAddressForEC2MetadataService() + "/latest/api/token"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java index 7d9e1f3955089..b9cc8ef8ac515 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -441,7 +441,7 @@ public void assertLinearizable() { if (history.size() > 300) { scheduler.schedule(() -> abort.set(true), 10, TimeUnit.SECONDS); } - linearizable = new LinearizabilityChecker().isLinearizable(spec, history, missingResponseGenerator(), abort::get); + linearizable = LinearizabilityChecker.isLinearizable(spec, history, missingResponseGenerator(), abort::get); ThreadPool.terminate(scheduler, 1, TimeUnit.SECONDS); if (abort.get() && linearizable == false) { linearizable = true; // let the test pass @@ -690,11 +690,7 @@ private static void runLinearizabilityChecker(FileInputStream fileInputStream, l LinearizabilityChecker.History history = readHistory(is); Version initialVersion = new Version(primaryTerm, seqNo); - boolean result = new LinearizabilityChecker().isLinearizable( - new CASSequentialSpec(initialVersion), - history, - missingResponseGenerator() - ); + boolean result = LinearizabilityChecker.isLinearizable(new CASSequentialSpec(initialVersion), history, missingResponseGenerator()); System.out.println(LinearizabilityChecker.visualize(new CASSequentialSpec(initialVersion), history, missingResponseGenerator())); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java index fce38ab63c302..901f8b1e83c69 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java @@ -243,7 +243,7 @@ public void onFailure(Exception e) { } } - private NodesRemovalPrevalidation createPrevalidationResult(DiscoveryNodes nodes, PrevalidateShardPathResponse response) { + private static NodesRemovalPrevalidation createPrevalidationResult(DiscoveryNodes nodes, PrevalidateShardPathResponse response) { List nodeResults = new ArrayList<>(response.getNodes().size() + response.failures().size()); for (NodePrevalidateShardPathResponse nodeResponse : response.getNodes()) { Result result; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index ba9ba10a80d0d..7111f64d050de 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -82,7 +82,7 @@ public void writeTo(StreamOutput out) throws IOException { RoutingExplanations.writeTo(explanations, out); } - private boolean emitState(ToXContent.Params params) { + private static boolean emitState(ToXContent.Params params) { return Objects.equals(params.param("metric"), "none") == false; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateAction.java index 7f3e058916572..dbb6b5994867f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/reservedstate/ReservedComposableIndexTemplateAction.java @@ -178,7 +178,7 @@ public TransformState transform(Object source, TransformState prevState) throws // 4. validate for v2 composable template overlaps for (var request : composables) { - indexTemplateService.v2TemplateOverlaps(state, request.name(), request.indexTemplate(), true); + MetadataIndexTemplateService.v2TemplateOverlaps(state, request.name(), request.indexTemplate(), true); } Set componentEntities = components.stream().map(r -> reservedComponentName(r.name())).collect(Collectors.toSet()); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java index 4e796a638f480..aca9bb81fb53f 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java @@ -68,7 +68,7 @@ private Collection prepare(List requests return requests; } - private ClusterState wrapIngestTaskExecute(IngestService.PipelineClusterStateUpdateTask task, ClusterState state) { + private static ClusterState wrapIngestTaskExecute(IngestService.PipelineClusterStateUpdateTask task, ClusterState state) { final var allIndexMetadata = state.metadata().indices().values(); final IngestMetadata currentIndexMetadata = state.metadata().custom(IngestMetadata.TYPE); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java index cefcd4bfb511e..8fd5e40dab1d2 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java @@ -60,7 +60,7 @@ void onNonFatalUncaught(final String threadName, final Throwable t) { logErrorMessage(t, message); } - private void logErrorMessage(Throwable t, String message) { + private static void logErrorMessage(Throwable t, String message) { AccessController.doPrivileged((PrivilegedAction) () -> { logger.error(message, t); return null; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index 1c2f120c6c925..fb5c9f2fea7de 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -249,7 +249,7 @@ private CoordinationDiagnosticsResult diagnoseOnHaveSeenMasterRecently(MasterHis * @param verbose Whether to calculate and include the details in the result * @return The CoordinationDiagnosticsResult for the given localMasterHistory */ - private CoordinationDiagnosticsResult diagnoseOnMasterHasChangedIdentity( + private static CoordinationDiagnosticsResult diagnoseOnMasterHasChangedIdentity( MasterHistory localMasterHistory, int masterChanges, boolean verbose @@ -352,7 +352,7 @@ private CoordinationDiagnosticsResult diagnoseOnMasterHasFlappedNull(MasterHisto * Returns a CoordinationDiagnosticsResult for the case when the master is seen as stable * @return A CoordinationDiagnosticsResult for the case when the master is seen as stable (GREEN status, no impacts or details) */ - private CoordinationDiagnosticsResult getMasterIsStableResult(boolean verbose, MasterHistory localMasterHistory) { + private static CoordinationDiagnosticsResult getMasterIsStableResult(boolean verbose, MasterHistory localMasterHistory) { String summary = "The cluster has a stable master node"; logger.trace("The cluster has a stable master node"); CoordinationDiagnosticsDetails details = getDetails(verbose, localMasterHistory, null, null); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java index 5dea832cc6ad3..56289ab348a3a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java @@ -219,7 +219,7 @@ private String getNameForNodeId(String nodeId) { * @param explain If true, the returned list includes a UserAction to contact support, otherwise an empty list * @return a single UserAction instructing users to contact support. */ - private List getContactSupportUserActions(boolean explain) { + private static List getContactSupportUserActions(boolean explain) { if (explain) { return List.of(CONTACT_SUPPORT); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 5415454a10486..cf63602729bb4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -643,7 +643,7 @@ public ClusterState addIndexTemplateV2( * @param validate should we throw {@link IllegalArgumentException} if conflicts are found or just compute them * @return a map of v2 template names to their index patterns for v2 templates that would overlap with the given template */ - public Map> v2TemplateOverlaps( + public static Map> v2TemplateOverlaps( ClusterState currentState, String name, final ComposableIndexTemplate template, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeService.java index e62bdbe01a8ee..1cdfbd40e5214 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeService.java @@ -107,7 +107,7 @@ boolean requiresUpdate(IndexMetadata indexMetadata) { } // package-private for testing - boolean isVisible(IndexMetadata indexMetadata) { + static boolean isVisible(IndexMetadata indexMetadata) { return indexMetadata.getSettings().getAsBoolean(IndexMetadata.SETTING_INDEX_HIDDEN, false) == false; } @@ -118,7 +118,7 @@ boolean shouldBeSystem(IndexMetadata indexMetadata) { } // package-private for testing - boolean hasVisibleAlias(IndexMetadata indexMetadata) { + static boolean hasVisibleAlias(IndexMetadata indexMetadata) { return indexMetadata.getAliases().values().stream().anyMatch(a -> Boolean.FALSE.equals(a.isHidden())); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java index c454a0daca8be..5f477a9ca66df 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -426,7 +426,7 @@ private void setFrozenFloodStageMaxHeadroom(ByteSizeValue maxHeadroom) { this.frozenFloodStageMaxHeadroom = maxHeadroom; } - private ByteSizeValue getFreeBytesThreshold(ByteSizeValue total, RelativeByteSizeValue watermark, ByteSizeValue maxHeadroom) { + private static ByteSizeValue getFreeBytesThreshold(ByteSizeValue total, RelativeByteSizeValue watermark, ByteSizeValue maxHeadroom) { // If bytes are given, they can be readily returned as free bytes. If percentages are given, we need to calculate the free bytes. if (watermark.isAbsolute()) { return watermark.getAbsolute(); @@ -450,7 +450,7 @@ public ByteSizeValue getFreeBytesThresholdFrozenFloodStage(ByteSizeValue total) return getFreeBytesThreshold(total, frozenFloodStageWatermark, frozenFloodStageMaxHeadroom); } - private ByteSizeValue getMinimumTotalSizeForBelowWatermark( + private static ByteSizeValue getMinimumTotalSizeForBelowWatermark( ByteSizeValue used, RelativeByteSizeValue watermark, ByteSizeValue maxHeadroom @@ -490,7 +490,7 @@ public TimeValue getRerouteInterval() { return rerouteInterval; } - private String describeThreshold( + private static String describeThreshold( ByteSizeValue total, RelativeByteSizeValue watermark, ByteSizeValue maxHeadroom, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java index 9d41dd86d2ceb..15710da073c8e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java @@ -725,7 +725,7 @@ private List checkDataTierAtShardLimit( } } - private List checkDataTierShouldMigrate( + private static List checkDataTierShouldMigrate( IndexMetadata indexMetadata, List dataTierAllocationResults, @Nullable String preferredTier, @@ -770,7 +770,7 @@ private List checkDataTierShouldMigrate( } } - private Optional checkNotEnoughNodesInDataTier( + private static Optional checkNotEnoughNodesInDataTier( List dataTierAllocationResults, @Nullable String preferredTier ) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 1a0c6453e815a..ee95074b8a730 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -313,7 +313,9 @@ public ClusterState execute(BatchExecutionContext b return newState; } - private TaskContext findLatest(List> taskContexts) { + private static TaskContext findLatest( + List> taskContexts + ) { return taskContexts.stream().max(Comparator.comparing(context -> context.getTask().desiredBalance.lastConvergedIndex())).get(); } @@ -331,7 +333,7 @@ private ClusterState applyBalance( } } - private void discardSupersededTasks( + private static void discardSupersededTasks( List> taskContexts, TaskContext latest ) { diff --git a/server/src/main/java/org/elasticsearch/common/cli/EnvironmentAwareCommand.java b/server/src/main/java/org/elasticsearch/common/cli/EnvironmentAwareCommand.java index 2f117d8f097f4..2fc6a1ce6320a 100644 --- a/server/src/main/java/org/elasticsearch/common/cli/EnvironmentAwareCommand.java +++ b/server/src/main/java/org/elasticsearch/common/cli/EnvironmentAwareCommand.java @@ -54,7 +54,7 @@ protected void execute(Terminal terminal, OptionSet options, ProcessInfo process execute(terminal, options, createEnv(options, processInfo), processInfo); } - private void putDockerEnvSettings(Map settings, Map envVars) { + private static void putDockerEnvSettings(Map settings, Map envVars) { for (var envVar : envVars.entrySet()) { String key = envVar.getKey(); if (DOCKER_LOWERCASE_SETTING_REGEX.matcher(key).matches()) { diff --git a/server/src/main/java/org/elasticsearch/common/logging/JULBridge.java b/server/src/main/java/org/elasticsearch/common/logging/JULBridge.java index 64915acaee031..91026ff38ab1f 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/JULBridge.java +++ b/server/src/main/java/org/elasticsearch/common/logging/JULBridge.java @@ -78,7 +78,7 @@ public void publish(LogRecord record) { } } - private Level translateJulLevel(java.util.logging.Level julLevel) { + private static Level translateJulLevel(java.util.logging.Level julLevel) { Level esLevel = levelMap.get(julLevel); if (esLevel != null) { return esLevel; diff --git a/server/src/main/java/org/elasticsearch/common/logging/internal/LoggerImpl.java b/server/src/main/java/org/elasticsearch/common/logging/internal/LoggerImpl.java index df9222453d862..7a41b469b1d45 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/internal/LoggerImpl.java +++ b/server/src/main/java/org/elasticsearch/common/logging/internal/LoggerImpl.java @@ -22,7 +22,7 @@ public LoggerImpl(org.apache.logging.log4j.Logger log4jLogger) { this.log4jLogger = log4jLogger; } - private org.apache.logging.log4j.util.Supplier mapSupplier(Supplier msgSupplier) { + private static org.apache.logging.log4j.util.Supplier mapSupplier(Supplier msgSupplier) { return () -> msgSupplier.get(); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index c0ad251e7c2c7..2f2e4367e1930 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -193,7 +193,7 @@ private void registerSetting(Setting setting) { // TODO: remove this hack once we remove the deprecated ability to use repository settings in the cluster state in the S3 snapshot // module - private boolean isS3InsecureCredentials(Setting setting) { + private static boolean isS3InsecureCredentials(Setting setting) { final String settingKey = setting.getKey(); return settingKey.equals("access_key") || settingKey.equals("secret_key"); } diff --git a/server/src/main/java/org/elasticsearch/health/HealthService.java b/server/src/main/java/org/elasticsearch/health/HealthService.java index f4b71247d6291..8f6460e0a5915 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthService.java +++ b/server/src/main/java/org/elasticsearch/health/HealthService.java @@ -182,7 +182,7 @@ private ActionRunnable> calculateFilteredIndicatorsR * @param results The results that the listener will be notified of, if they pass validation * @param listener A listener to be notified of results */ - private void validateResultsAndNotifyListener( + private static void validateResultsAndNotifyListener( @Nullable String indicatorName, List results, ActionListener> listener @@ -203,7 +203,7 @@ assert findDuplicatesByName(results).isEmpty() * @param computeDetails If details should be calculated on which indicators are causing the UNKNOWN state. * @return Details explaining why results are UNKNOWN, or an empty detail set if computeDetails is false. */ - private HealthIndicatorDetails healthUnknownReason(List preflightResults, boolean computeDetails) { + private static HealthIndicatorDetails healthUnknownReason(List preflightResults, boolean computeDetails) { assert preflightResults.isEmpty() == false : "Requires at least one non-GREEN preflight result"; HealthIndicatorDetails unknownDetails; if (computeDetails) { @@ -226,7 +226,7 @@ private HealthIndicatorDetails healthUnknownReason(List p * @param details the details to include on the result * @return A result with the UNKNOWN status */ - private HealthIndicatorResult generateUnknownResult( + private static HealthIndicatorResult generateUnknownResult( HealthIndicatorService indicatorService, String summary, HealthIndicatorDetails details diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java index 6577171202169..b5a334e56e94c 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java @@ -293,7 +293,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private ByteSizeValue getFreeBytes(ByteSizeValue total, RelativeByteSizeValue watermark, ByteSizeValue maxHeadroom) { + private static ByteSizeValue getFreeBytes(ByteSizeValue total, RelativeByteSizeValue watermark, ByteSizeValue maxHeadroom) { if (watermark.isAbsolute()) { return watermark.getAbsolute(); } @@ -312,7 +312,7 @@ public ByteSizeValue getFreeBytesFrozenFloodStageWatermark(ByteSizeValue total) return getFreeBytes(total, frozenFloodStageWatermark, frozenFloodStageMaxHeadroom); } - private String getThresholdStringRep(RelativeByteSizeValue relativeByteSizeValue) { + private static String getThresholdStringRep(RelativeByteSizeValue relativeByteSizeValue) { if (relativeByteSizeValue.isAbsolute()) { return relativeByteSizeValue.getAbsolute().getStringRep(); } else { diff --git a/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java index 8dabc6da9dce4..925dae7136914 100644 --- a/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java @@ -116,7 +116,7 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources * not ordinary important, but could be useful in tracking down problems where nodes have stopped reporting health node information. * @param diskHealthInfoMap A map of nodeId to DiskHealthInfo */ - private void logNodesMissingHealthInfo(Map diskHealthInfoMap, ClusterState clusterState) { + private static void logNodesMissingHealthInfo(Map diskHealthInfoMap, ClusterState clusterState) { if (logger.isDebugEnabled()) { String nodesMissingHealthInfo = getSortedUniqueValuesString( clusterState.getNodes().getAllNodes(), @@ -492,7 +492,7 @@ static Diagnosis createNonDataNodeDiagnosis(HealthStatus healthStatus, List> nodes) { + private static int getUnhealthyNodeSize(Map> nodes) { return (nodes.containsKey(HealthStatus.RED) ? nodes.get(HealthStatus.RED).size() : 0) + (nodes.containsKey(HealthStatus.YELLOW) ? nodes.get(HealthStatus.YELLOW).size() : 0); diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index 8d25233a1d6e5..709ac83d65553 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -212,7 +212,7 @@ public void clusterChanged(ClusterChangedEvent event) { } } - private boolean hasMasterNodeChanged(DiscoveryNode currentMasterNode, ClusterChangedEvent event) { + private static boolean hasMasterNodeChanged(DiscoveryNode currentMasterNode, ClusterChangedEvent event) { DiscoveryNode previousMasterNode = event.previousState().nodes().getMasterNode(); if (currentMasterNode == null || previousMasterNode == null) { return currentMasterNode != previousMasterNode; diff --git a/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java b/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java index 35a5374551da7..18c009deb8840 100644 --- a/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java +++ b/server/src/main/java/org/elasticsearch/health/node/action/TransportHealthNodeAction.java @@ -152,7 +152,7 @@ public void handleException(final TransportException exception) { } } - private boolean isTaskCancelled(Task task) { + private static boolean isTaskCancelled(Task task) { return (task instanceof CancellableTask t) && t.isCancelled(); } } diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java index c52f69d232c87..53941937cbf3c 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java @@ -185,7 +185,7 @@ void abortTaskIfApplicable(String reason) { } } - private boolean isNodeShuttingDown(ClusterChangedEvent event, String nodeId) { + private static boolean isNodeShuttingDown(ClusterChangedEvent event, String nodeId) { return event.previousState().metadata().nodeShutdowns().contains(nodeId) == false && event.state().metadata().nodeShutdowns().contains(nodeId); } diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index c96c0f149a972..8b087f5a302db 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -244,7 +244,7 @@ public void onStoreClosed(ShardId shardId) { } } - private void callListeners( + private static void callListeners( IndexShard indexShard, Iterator>> iterator, ActionListener outerListener diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java index 0e71fdca06b0d..df1aca3dc7b53 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java @@ -128,11 +128,11 @@ private boolean isCounterOrGaugeMetricType(String field) { return false; } - private boolean isTimestampField(String field) { + private static boolean isTimestampField(String field) { return "@timestamp".equals(field); } - private boolean isNotSpecialField(String field) { + private static boolean isNotSpecialField(String field) { return field.startsWith("_") == false; } } diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java index b4c0f70ad8c99..e3877e65581f2 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java @@ -217,20 +217,20 @@ void decode(DataInput in, long[] out) throws IOException { } // this loop should auto-vectorize - private void mul(long[] arr, long m) { + private static void mul(long[] arr, long m) { for (int i = 0; i < ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE; ++i) { arr[i] *= m; } } // this loop should auto-vectorize - private void add(long[] arr, long min) { + private static void add(long[] arr, long min) { for (int i = 0; i < ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE; ++i) { arr[i] += min; } } - private void deltaDecode(long[] arr) { + private static void deltaDecode(long[] arr) { for (int i = 1; i < ES87TSDBDocValuesFormat.NUMERIC_BLOCK_SIZE; ++i) { arr[i] += arr[i - 1]; } diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java index 174c3fbc9f0b9..4c691d84e2b4d 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java @@ -158,13 +158,13 @@ private void readFields(IndexInput meta, FieldInfos infos) throws IOException { } } - private NumericEntry readNumeric(IndexInput meta) throws IOException { + private static NumericEntry readNumeric(IndexInput meta) throws IOException { NumericEntry entry = new NumericEntry(); readNumeric(meta, entry); return entry; } - private void readNumeric(IndexInput meta, NumericEntry entry) throws IOException { + private static void readNumeric(IndexInput meta, NumericEntry entry) throws IOException { entry.docsWithFieldOffset = meta.readLong(); entry.docsWithFieldLength = meta.readLong(); entry.jumpTableEntryCount = meta.readShort(); @@ -184,13 +184,13 @@ private void readNumeric(IndexInput meta, NumericEntry entry) throws IOException } } - private SortedNumericEntry readSortedNumeric(IndexInput meta) throws IOException { + private static SortedNumericEntry readSortedNumeric(IndexInput meta) throws IOException { SortedNumericEntry entry = new SortedNumericEntry(); readSortedNumeric(meta, entry); return entry; } - private SortedNumericEntry readSortedNumeric(IndexInput meta, SortedNumericEntry entry) throws IOException { + private static SortedNumericEntry readSortedNumeric(IndexInput meta, SortedNumericEntry entry) throws IOException { readNumeric(meta, entry); entry.numDocsWithField = meta.readInt(); if (entry.numDocsWithField != entry.numValues) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 6f5716c880313..1a1e470519213 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -271,7 +271,7 @@ private static int indexOfKeptCommits(List commits, long return 0; } - private Set listOfNewFileNames(IndexCommit previous, IndexCommit current) throws IOException { + private static Set listOfNewFileNames(IndexCommit previous, IndexCommit current) throws IOException { final Set previousFiles = previous != null ? new HashSet<>(previous.getFileNames()) : Set.of(); return current.getFileNames().stream().filter(f -> previousFiles.contains(f) == false).collect(Collectors.toUnmodifiableSet()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 4cc5e41d86f0c..f97817570838c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -443,7 +443,7 @@ public Query geoShapeQuery(SearchExecutionContext context, String fieldName, Sha return query; } - private boolean isPointGeometry(LatLonGeometry[] geometries) { + private static boolean isPointGeometry(LatLonGeometry[] geometries) { return geometries.length == 1 && geometries[0] instanceof org.apache.lucene.geo.Point; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java index 691d0a3f6c4d0..f9e2c55e5085a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValues.java @@ -145,7 +145,7 @@ public void write(XContentBuilder b) throws IOException { values = emptyList(); } - private void decodeAndWrite(XContentBuilder b, BytesRef r) throws IOException { + private static void decodeAndWrite(XContentBuilder b, BytesRef r) throws IOException { switch (r.bytes[r.offset]) { case 'b': b.value(r.bytes, r.offset + 1, r.length - 1); @@ -189,7 +189,7 @@ private void decodeAndWrite(XContentBuilder b, BytesRef r) throws IOException { } } - private void decodeAndWriteXContent(XContentBuilder b, XContentType type, BytesRef r) throws IOException { + private static void decodeAndWriteXContent(XContentBuilder b, XContentType type, BytesRef r) throws IOException { BytesReference ref = new BytesArray(r.bytes, r.offset + 1, r.length - 1); try (XContentParser parser = type.xContent().createParser(XContentParserConfiguration.EMPTY, ref.streamInput())) { b.copyCurrentStructure(parser); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 07dab213c13bb..70f4d2d901b45 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -382,7 +382,7 @@ private TextFieldType buildFieldType( return ft; } - private KeywordFieldMapper.KeywordFieldType syntheticSourceDelegate(FieldType fieldType, MultiFields multiFields) { + private static KeywordFieldMapper.KeywordFieldType syntheticSourceDelegate(FieldType fieldType, MultiFields multiFields) { if (fieldType.stored()) { return null; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java index 7dc32901be0e0..a79b37796bbe9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldSyntheticWriterHelper.java @@ -252,7 +252,7 @@ void write(final XContentBuilder b) throws IOException { } } - private void writeObject( + private static void writeObject( final XContentBuilder b, final KeyValue currKeyValue, final KeyValue nextKeyValue, diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 06c0a8f6d494b..deb178ff724bb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -591,7 +591,7 @@ void checkNanAndInfinite(float[] vector) { } } - StringBuilder appendErrorElements(StringBuilder errorBuilder, float[] vector) { + static StringBuilder appendErrorElements(StringBuilder errorBuilder, float[] vector) { // Include the first five elements of the invalid vector in the error message errorBuilder.append(" Preview of invalid vector: ["); for (int i = 0; i < Math.min(5, vector.length); i++) { @@ -607,7 +607,7 @@ StringBuilder appendErrorElements(StringBuilder errorBuilder, float[] vector) { return errorBuilder; } - StringBuilder appendErrorElements(StringBuilder errorBuilder, byte[] vector) { + static StringBuilder appendErrorElements(StringBuilder errorBuilder, byte[] vector) { // Include the first five elements of the invalid vector in the error message errorBuilder.append(" Preview of invalid vector: ["); for (int i = 0; i < Math.min(5, vector.length); i++) { @@ -623,11 +623,11 @@ StringBuilder appendErrorElements(StringBuilder errorBuilder, byte[] vector) { return errorBuilder; } - Function errorFloatElementsAppender(float[] vector) { + static Function errorFloatElementsAppender(float[] vector) { return sb -> appendErrorElements(sb, vector); } - Function errorByteElementsAppender(byte[] vector) { + static Function errorByteElementsAppender(byte[] vector) { return sb -> appendErrorElements(sb, vector); } } @@ -851,7 +851,7 @@ public Query createKnnQuery( if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); - elementType.checkVectorMagnitude(similarity, elementType.errorByteElementsAppender(queryVector), squaredMagnitude); + elementType.checkVectorMagnitude(similarity, ElementType.errorByteElementsAppender(queryVector), squaredMagnitude); } Query knnQuery = parentFilter != null ? new DiversifyingChildrenByteKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) @@ -888,7 +888,7 @@ public Query createKnnQuery( if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); - elementType.checkVectorMagnitude(similarity, elementType.errorFloatElementsAppender(queryVector), squaredMagnitude); + elementType.checkVectorMagnitude(similarity, ElementType.errorFloatElementsAppender(queryVector), squaredMagnitude); } Query knnQuery = switch (elementType) { case BYTE -> { diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java index ac67c3c95719c..a19e02ea91cfb 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java @@ -121,7 +121,7 @@ protected AbstractGeometryQueryBuilder(String fieldName, Geometry shape, String this.supplier = null; } - private void checkGeometry(Geometry geometry) { + private static void checkGeometry(Geometry geometry) { // linear ring geometries are not serializable, fail at construction time. if (geometry.type() == ShapeType.LINEARRING) { throw new IllegalArgumentException("[" + ShapeType.LINEARRING + "] geometries are not supported"); diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappers.java b/server/src/main/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappers.java index e3d4b02c52ee7..86304f1a5b362 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappers.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/wrappers/StableApiWrappers.java @@ -136,7 +136,7 @@ public org.elasticsearch.index.analysis.AnalysisMode getAnalysisMode() { return mapAnalysisMode(f.getAnalysisMode()); } - private org.elasticsearch.index.analysis.AnalysisMode mapAnalysisMode(AnalysisMode analysisMode) { + private static org.elasticsearch.index.analysis.AnalysisMode mapAnalysisMode(AnalysisMode analysisMode) { return org.elasticsearch.index.analysis.AnalysisMode.valueOf(analysisMode.name()); } }; diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 3db40ef018f98..cbc985ea4215c 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -941,7 +941,7 @@ private void executePipelines( } } - private void executePipeline( + private static void executePipeline( final IngestDocument ingestDocument, final Pipeline pipeline, final BiConsumer handler diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 59f914d27382d..1b970fc3283db 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -1298,7 +1298,7 @@ private static ReloadablePlugin wrapPlugins(List reloadablePlu }; } - private TelemetryProvider getTelemetryProvider(PluginsService pluginsService, Settings settings) { + private static TelemetryProvider getTelemetryProvider(PluginsService pluginsService, Settings settings) { final List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); if (telemetryPlugins.size() > 1) { @@ -1329,7 +1329,7 @@ private HealthService createHealthService( return new HealthService(concatLists(serverHealthIndicatorServices, pluginHealthIndicatorServices), threadPool); } - private HealthPeriodicLogger createHealthPeriodicLogger( + private static HealthPeriodicLogger createHealthPeriodicLogger( ClusterService clusterService, Settings settings, NodeClient client, diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java b/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java index 92a58a809c59d..8b7e4faa1f226 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginIntrospector.java @@ -111,7 +111,7 @@ Map deprecatedMethods(final Class pluginClass) { // finds the subset of given methods that are overridden by the given class // returns a map of method name to interface name the method was declared in - private Map findOverriddenMethods(final Class pluginClass, Map, List> methodsMap) { + private static Map findOverriddenMethods(final Class pluginClass, Map, List> methodsMap) { assert Plugin.class.isAssignableFrom(pluginClass); List> clazzes = Stream.concat(Stream.of(Plugin.class), interfaceClasses(pluginClass, methodsMap::containsKey)).toList(); if (clazzes.isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java b/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java index 609a7b104f271..e007cac442f89 100644 --- a/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java +++ b/server/src/main/java/org/elasticsearch/plugins/scanners/NamedComponentReader.java @@ -72,7 +72,7 @@ Map findNamedComponents(Path pluginDir, ClassLoader pl return emptyMap(); } - private Path findNamedComponentCacheFile(Path pluginDir) throws IOException { + private static Path findNamedComponentCacheFile(Path pluginDir) throws IOException { try (Stream list = Files.list(pluginDir)) { return list.filter(p -> p.getFileName().toString().equals(NAMED_COMPONENTS_FILE_NAME)).findFirst().orElse(null); } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java b/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java index e34830dc3b355..5bead071cb4b6 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java @@ -48,7 +48,7 @@ public String name() { } @SuppressWarnings("unchecked") - private ClusterUpdateSettingsRequest prepare(Object input, Set previouslySet) { + private static ClusterUpdateSettingsRequest prepare(Object input, Set previouslySet) { // load the new settings into a builder so their paths are normalized @SuppressWarnings("unchecked") Settings.Builder newSettings = Settings.builder().loadFromMap((Map) input); diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 6677b17b9f810..e780b36568190 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -124,7 +124,7 @@ protected void processFileChanges() throws ExecutionException, InterruptedExcept completion.get(); } - private void completeProcessing(Exception e, PlainActionFuture completion) { + private static void completeProcessing(Exception e, PlainActionFuture completion) { if (e != null) { completion.onFailure(e); } else { diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java index 51102504e9dc2..3adf32454cc20 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedClusterStateService.java @@ -344,7 +344,7 @@ TrialRunResult trialRun( * * Package private for testing */ - void executeNonStateTransformationSteps( + static void executeNonStateTransformationSteps( List>> nonStateTransforms, ActionListener> listener ) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index 5f9be1acf4537..189bd9c2b9551 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -61,7 +61,7 @@ public String getName() { return "cluster_get_settings_action"; } - private void setUpRequestParams(MasterNodeReadRequest clusterRequest, RestRequest request) { + private static void setUpRequestParams(MasterNodeReadRequest clusterRequest, RestRequest request) { clusterRequest.local(request.paramAsBoolean("local", clusterRequest.local())); clusterRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterRequest.masterNodeTimeout())); } diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java index 9d031267d61f3..a986b62ce8496 100644 --- a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteBinaryDenseVector.java @@ -73,7 +73,7 @@ public double dotProduct(List queryVector) { } @SuppressForbidden(reason = "used only for bytes so it cannot overflow") - private int abs(int value) { + private static int abs(int value) { return Math.abs(value); } diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java index d982a94148516..b00b6703872ab 100644 --- a/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/ByteKnnDenseVector.java @@ -70,7 +70,7 @@ public double dotProduct(List queryVector) { } @SuppressForbidden(reason = "used only for bytes so it cannot overflow") - private int abs(int value) { + private static int abs(int value) { return Math.abs(value); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java index ee81039569f92..9b4656ee7cf7e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java @@ -180,7 +180,7 @@ public AbstractInternalHDRPercentiles reduce(List aggregati * @param histogram2 The second histogram to merge * @return One of the input histograms such that the one with higher numberOfSignificantValueDigits is used as the one for merging */ - private DoubleHistogram merge(final DoubleHistogram histogram1, final DoubleHistogram histogram2) { + private static DoubleHistogram merge(final DoubleHistogram histogram1, final DoubleHistogram histogram2) { DoubleHistogram moreDigits = histogram1; DoubleHistogram lessDigits = histogram2; if (histogram2.getNumberOfSignificantValueDigits() > histogram1.getNumberOfSignificantValueDigits()) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java index 0faa01c8c4410..08588473c61d1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java @@ -159,7 +159,7 @@ public AbstractInternalTDigestPercentiles reduce(List aggre * @param digest2 The second histogram to merge * @return One of the input histograms such that the one with larger compression is used as the one for merging */ - private TDigestState merge(final TDigestState digest1, final TDigestState digest2) { + private static TDigestState merge(final TDigestState digest1, final TDigestState digest2) { TDigestState largerCompression = digest1; TDigestState smallerCompression = digest2; if (digest2.compression() > digest1.compression()) { diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java index eb199752f6fed..66ccae1746197 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -171,7 +171,7 @@ private static Timer maybeStartTimer(DfsProfiler profiler, DfsTimingType dtt) { return null; }; - private void executeKnnVectorQuery(SearchContext context) throws IOException { + private static void executeKnnVectorQuery(SearchContext context) throws IOException { SearchSourceBuilder source = context.request().source(); if (source == null || source.knnSearch().isEmpty()) { return; diff --git a/server/src/main/java/org/elasticsearch/search/lookup/StoredFieldSourceProvider.java b/server/src/main/java/org/elasticsearch/search/lookup/StoredFieldSourceProvider.java index 7db23b8823382..7516ab93f75a5 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/StoredFieldSourceProvider.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/StoredFieldSourceProvider.java @@ -38,7 +38,7 @@ public Source getSource(LeafReaderContext ctx, int doc) throws IOException { return leaves[ctx.ord].getSource(doc); } - private IndexReaderContext findParentContext(LeafReaderContext ctx) { + private static IndexReaderContext findParentContext(LeafReaderContext ctx) { if (ctx.parent != null) { return ctx.parent; } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java index 4ecb33a907f1e..13ca1d3dc1db2 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java @@ -100,7 +100,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws return super.doRewrite(queryRewriteContext); } - private int[] findSegmentStarts(IndexReader reader, int[] docs) { + private static int[] findSegmentStarts(IndexReader reader, int[] docs) { int[] starts = new int[reader.leaves().size() + 1]; starts[starts.length - 1] = docs.length; if (starts.length == 2) { diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index 35655f6260461..e1f6e561a047f 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -271,7 +271,7 @@ public void onFailure(Exception e) { if (countDown.countDown()) { if (attemptNumber >= MAX_CONNECT_ATTEMPTS_PER_RUN && connectionManager.size() == 0) { logger.warn(() -> "failed to open any proxy connections to cluster [" + clusterAlias + "]", e); - if (exceptions.values().stream().allMatch(ProxyConnectionStrategy.this::isRetryableException)) { + if (exceptions.values().stream().allMatch(RemoteConnectionStrategy::isRetryableException)) { finished.onFailure(getNoSeedNodeLeftException(exceptions.values())); } else { exceptions.values().stream().filter(e1 -> e1 != e).forEach(e::addSuppressed); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java index 802b703494792..32cd3fadad0a8 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java @@ -383,7 +383,7 @@ boolean assertNoRunningConnections() { protected abstract RemoteConnectionInfo.ModeInfo getModeInfo(); - protected boolean isRetryableException(Exception e) { + protected static boolean isRetryableException(Exception e) { // ISE if we fail the handshake with a version incompatible node return e instanceof ConnectTransportException || e instanceof IOException || e instanceof IllegalStateException; } diff --git a/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java b/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java index b7fc51ead8cf5..7db4c7b8da651 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java @@ -61,7 +61,7 @@ public void submit(ClusterService clusterService) { } @SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here - private void submitUnbatchedTask( + private static void submitUnbatchedTask( ClusterService clusterService, @SuppressWarnings("SameParameterValue") String source, ClusterStateUpdateTask task diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java index 72dca43ce796b..d6df8e26721c0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java @@ -19,8 +19,6 @@ public class LinearizabilityCheckerTests extends ESTestCase { - final LinearizabilityChecker checker = new LinearizabilityChecker(); - /** * Simple specification of a lock that can be exactly locked once. There is no unlocking. * Input is always null (and represents lock acquisition), output is a boolean whether lock was acquired. @@ -66,7 +64,7 @@ public void testLockWithLinearizableHistory1() { history.respond(call0, true); // 0: lock acquisition succeeded int call1 = history.invoke(null); // 1: acquire lock history.respond(call1, false); // 0: lock acquisition failed - assertTrue(checker.isLinearizable(lockSpec, history)); + assertTrue(LinearizabilityChecker.isLinearizable(lockSpec, history)); } public void testLockWithLinearizableHistory2() { @@ -75,7 +73,7 @@ public void testLockWithLinearizableHistory2() { int call1 = history.invoke(null); // 1: acquire lock history.respond(call0, false); // 0: lock acquisition failed history.respond(call1, true); // 0: lock acquisition succeeded - assertTrue(checker.isLinearizable(lockSpec, history)); + assertTrue(LinearizabilityChecker.isLinearizable(lockSpec, history)); } public void testLockWithLinearizableHistory3() { @@ -84,7 +82,7 @@ public void testLockWithLinearizableHistory3() { int call1 = history.invoke(null); // 1: acquire lock history.respond(call0, true); // 0: lock acquisition succeeded history.respond(call1, false); // 0: lock acquisition failed - assertTrue(checker.isLinearizable(lockSpec, history)); + assertTrue(LinearizabilityChecker.isLinearizable(lockSpec, history)); } public void testLockWithNonLinearizableHistory() { @@ -93,7 +91,7 @@ public void testLockWithNonLinearizableHistory() { history.respond(call0, false); // 0: lock acquisition failed int call1 = history.invoke(null); // 1: acquire lock history.respond(call1, true); // 0: lock acquisition succeeded - assertFalse(checker.isLinearizable(lockSpec, history)); + assertFalse(LinearizabilityChecker.isLinearizable(lockSpec, history)); } /** @@ -137,11 +135,11 @@ public void testRegisterWithLinearizableHistory() { history.respond(call2, 0); // 2: read returns 0 history.respond(call1, 42); // 1: read returns 42 - expectThrows(IllegalArgumentException.class, () -> checker.isLinearizable(registerSpec, history)); - assertTrue(checker.isLinearizable(registerSpec, history, i -> null)); + expectThrows(IllegalArgumentException.class, () -> LinearizabilityChecker.isLinearizable(registerSpec, history)); + assertTrue(LinearizabilityChecker.isLinearizable(registerSpec, history, i -> null)); history.respond(call0, null); // 0: write returns - assertTrue(checker.isLinearizable(registerSpec, history)); + assertTrue(LinearizabilityChecker.isLinearizable(registerSpec, history)); } public void testRegisterWithNonLinearizableHistory() { @@ -152,11 +150,11 @@ public void testRegisterWithNonLinearizableHistory() { int call2 = history.invoke(null); // 2: invoke read history.respond(call2, 0); // 2: read returns 0, not allowed - expectThrows(IllegalArgumentException.class, () -> checker.isLinearizable(registerSpec, history)); - assertFalse(checker.isLinearizable(registerSpec, history, i -> null)); + expectThrows(IllegalArgumentException.class, () -> LinearizabilityChecker.isLinearizable(registerSpec, history)); + assertFalse(LinearizabilityChecker.isLinearizable(registerSpec, history, i -> null)); history.respond(call0, null); // 0: write returns - assertFalse(checker.isLinearizable(registerSpec, history)); + assertFalse(LinearizabilityChecker.isLinearizable(registerSpec, history)); } public void testRegisterObservedSequenceOfUpdatesWitLinearizableHistory() { @@ -173,7 +171,7 @@ public void testRegisterObservedSequenceOfUpdatesWitLinearizableHistory() { history.respond(call0, null); // 0: write returns history.respond(call1, null); // 1: write returns - assertTrue(checker.isLinearizable(registerSpec, history)); + assertTrue(LinearizabilityChecker.isLinearizable(registerSpec, history)); } public void testRegisterObservedSequenceOfUpdatesWithNonLinearizableHistory() { @@ -190,7 +188,7 @@ public void testRegisterObservedSequenceOfUpdatesWithNonLinearizableHistory() { history.respond(call0, null); // 0: write returns history.respond(call1, null); // 1: write returns - assertFalse(checker.isLinearizable(registerSpec, history)); + assertFalse(LinearizabilityChecker.isLinearizable(registerSpec, history)); } final SequentialSpec multiRegisterSpec = new KeyedSpec() { @@ -229,12 +227,12 @@ public void testMultiRegisterWithLinearizableHistory() { history.respond(callY1, 42); // 1: read returns 42 on key y history.respond(callX1, 42); // 1: read returns 42 on key x - expectThrows(IllegalArgumentException.class, () -> checker.isLinearizable(multiRegisterSpec, history)); - assertTrue(checker.isLinearizable(multiRegisterSpec, history, i -> null)); + expectThrows(IllegalArgumentException.class, () -> LinearizabilityChecker.isLinearizable(multiRegisterSpec, history)); + assertTrue(LinearizabilityChecker.isLinearizable(multiRegisterSpec, history, i -> null)); history.respond(callX0, null); // 0: write returns on key x history.respond(callY0, null); // 0: write returns on key y - assertTrue(checker.isLinearizable(multiRegisterSpec, history)); + assertTrue(LinearizabilityChecker.isLinearizable(multiRegisterSpec, history)); } public void testMultiRegisterWithNonLinearizableHistory() { @@ -250,11 +248,11 @@ public void testMultiRegisterWithNonLinearizableHistory() { history.respond(callY2, 0); // 2: read returns 0 on key y, not allowed history.respond(callX1, 42); // 1: read returns 42 on key x - expectThrows(IllegalArgumentException.class, () -> checker.isLinearizable(multiRegisterSpec, history)); - assertFalse(checker.isLinearizable(multiRegisterSpec, history, i -> null)); + expectThrows(IllegalArgumentException.class, () -> LinearizabilityChecker.isLinearizable(multiRegisterSpec, history)); + assertFalse(LinearizabilityChecker.isLinearizable(multiRegisterSpec, history, i -> null)); history.respond(callX0, null); // 0: write returns on key x history.respond(callY0, null); // 0: write returns on key y - assertFalse(checker.isLinearizable(multiRegisterSpec, history)); + assertFalse(LinearizabilityChecker.isLinearizable(multiRegisterSpec, history)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 993146362acad..014834e98600a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -2334,14 +2334,14 @@ public void testV2TemplateOverlaps() throws Exception { ); // when validating is false, we return the conflicts instead of throwing an exception - var overlaps = metadataIndexTemplateService.v2TemplateOverlaps(state, "foo2", newTemplate, false); + var overlaps = MetadataIndexTemplateService.v2TemplateOverlaps(state, "foo2", newTemplate, false); assertThat(overlaps, allOf(aMapWithSize(1), hasKey("foo"))); // try now the same thing with validation on IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> metadataIndexTemplateService.v2TemplateOverlaps(state, "foo2", newTemplate, true) + () -> MetadataIndexTemplateService.v2TemplateOverlaps(state, "foo2", newTemplate, true) ); assertThat( e.getMessage(), @@ -2363,7 +2363,7 @@ public void testV2TemplateOverlaps() throws Exception { null ); - overlaps = metadataIndexTemplateService.v2TemplateOverlaps(state, "no-conflict", nonConflict, true); + overlaps = MetadataIndexTemplateService.v2TemplateOverlaps(state, "no-conflict", nonConflict, true); assertTrue(overlaps.isEmpty()); } @@ -2392,7 +2392,7 @@ public void testV2TemplateOverlaps() throws Exception { ); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> metadataIndexTemplateService.v2TemplateOverlaps(state, "foo2", newTemplate, true) + () -> MetadataIndexTemplateService.v2TemplateOverlaps(state, "foo2", newTemplate, true) ); assertThat( e.getMessage(), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeServiceTests.java index bfdef69a5275a..b758d32d6149f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/SystemIndexMetadataUpgradeServiceTests.java @@ -131,17 +131,17 @@ public void testHasVisibleAliases() { .build(); // non-system indices should not require update - assertThat(service.hasVisibleAlias(nonSystemHiddenAlias), equalTo(false)); + assertThat(SystemIndexMetadataUpgradeService.hasVisibleAlias(nonSystemHiddenAlias), equalTo(false)); assertThat(service.requiresUpdate(nonSystemHiddenAlias), equalTo(false)); - assertThat(service.hasVisibleAlias(nonSystemVisibleAlias), equalTo(true)); + assertThat(SystemIndexMetadataUpgradeService.hasVisibleAlias(nonSystemVisibleAlias), equalTo(true)); assertThat(service.requiresUpdate(nonSystemVisibleAlias), equalTo(false)); // hidden system alias should not require update - assertThat(service.hasVisibleAlias(systemHiddenAlias), equalTo(false)); + assertThat(SystemIndexMetadataUpgradeService.hasVisibleAlias(systemHiddenAlias), equalTo(false)); assertThat(service.requiresUpdate(systemHiddenAlias), equalTo(false)); // visible system alias should require update - assertThat(service.hasVisibleAlias(systemVisibleAlias), equalTo(true)); + assertThat(SystemIndexMetadataUpgradeService.hasVisibleAlias(systemVisibleAlias), equalTo(true)); assertThat(service.requiresUpdate(systemVisibleAlias), equalTo(true)); } @@ -195,17 +195,17 @@ public void testIsVisible() { .build(); // non-system indices should not require update - assertThat(service.isVisible(nonSystemHiddenIndex), equalTo(false)); + assertThat(SystemIndexMetadataUpgradeService.isVisible(nonSystemHiddenIndex), equalTo(false)); assertThat(service.requiresUpdate(nonSystemHiddenIndex), equalTo(false)); - assertThat(service.isVisible(nonSystemVisibleIndex), equalTo(true)); + assertThat(SystemIndexMetadataUpgradeService.isVisible(nonSystemVisibleIndex), equalTo(true)); assertThat(service.requiresUpdate(nonSystemVisibleIndex), equalTo(false)); // hidden system index should not require update - assertThat(service.isVisible(systemHiddenIndex), equalTo(false)); + assertThat(SystemIndexMetadataUpgradeService.isVisible(systemHiddenIndex), equalTo(false)); assertThat(service.requiresUpdate(systemHiddenIndex), equalTo(false)); // visible system index should require update - assertThat(service.isVisible(systemVisibleIndex), equalTo(true)); + assertThat(SystemIndexMetadataUpgradeService.isVisible(systemVisibleIndex), equalTo(true)); assertThat(service.requiresUpdate(systemVisibleIndex), equalTo(true)); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index e05be91c651c1..cd04f81f0f355 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -34,7 +34,7 @@ public class DynamicMappingTests extends MapperServiceTestCase { - private XContentBuilder dynamicMapping(String dynamicValue, CheckedConsumer buildFields) + private static XContentBuilder dynamicMapping(String dynamicValue, CheckedConsumer buildFields) throws IOException { return topMapping(b -> { b.field("dynamic", dynamicValue); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java index f7bac4f303e5a..07c224fc561d1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java @@ -58,7 +58,8 @@ private DocumentMapper createDocumentMapper(String routingPath, XContentBuilder ).documentMapper(); } - private ParsedDocument parseDocument(DocumentMapper docMapper, CheckedConsumer f) throws IOException { + private static ParsedDocument parseDocument(DocumentMapper docMapper, CheckedConsumer f) + throws IOException { // Add the @timestamp field required by DataStreamTimestampFieldMapper for all time series indices return docMapper.parse(source(null, b -> { f.accept(b); @@ -66,7 +67,7 @@ private ParsedDocument parseDocument(DocumentMapper docMapper, CheckedConsumer f) throws IOException { + private static BytesRef parseAndGetTsid(DocumentMapper docMapper, CheckedConsumer f) throws IOException { return parseDocument(docMapper, f).rootDoc().getBinaryValue(TimeSeriesIdFieldMapper.NAME); } diff --git a/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java index 4896f118a1327..1540d3223ae72 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java +++ b/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java @@ -146,11 +146,11 @@ public void testResetRecovery() throws Exception { } } - long startRecovery(RecoveriesCollection collection, DiscoveryNode sourceNode, IndexShard shard) { + static long startRecovery(RecoveriesCollection collection, DiscoveryNode sourceNode, IndexShard shard) { return startRecovery(collection, sourceNode, shard, listener, TimeValue.timeValueMinutes(60)); } - long startRecovery( + static long startRecovery( RecoveriesCollection collection, DiscoveryNode sourceNode, IndexShard indexShard, diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java index f280e0556ae72..e8f5a71ad6fcb 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/ReservedClusterStateServiceTests.java @@ -641,7 +641,7 @@ public Map fromXContent(XContentParser parser) throws IOExceptio ); assertEquals(count, trialRunResult.nonStateTransforms().size()); - controller.executeNonStateTransformationSteps(trialRunResult.nonStateTransforms(), new ActionListener<>() { + ReservedClusterStateService.executeNonStateTransformationSteps(trialRunResult.nonStateTransforms(), new ActionListener<>() { @Override public void onResponse(Collection nonStateTransformResults) { assertEquals(count, nonStateTransformResults.size()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceTypeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceTypeTests.java index 2c8c6965152a0..07f41f2ae788b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceTypeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/CoreValuesSourceTypeTests.java @@ -143,7 +143,7 @@ private MapperService dateMapperService() throws IOException { return createMapperService(fieldMapping(b -> b.field("type", "date"))); } - private List docsWithDatesBetween(long min, long max) throws IOException { + private static List docsWithDatesBetween(long min, long max) throws IOException { return List.of(source(b -> b.field("field", min)), source(b -> b.field("field", max))); } } diff --git a/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java b/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java index 27da5ff17af2d..964cb178b386f 100644 --- a/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java +++ b/test/fixtures/gcs-fixture/src/main/java/fixture/gcs/GoogleCloudStorageHttpHandler.java @@ -275,7 +275,7 @@ public Map blobs() { return blobs; } - private String httpServerUrl(final HttpExchange exchange) { + private static String httpServerUrl(final HttpExchange exchange) { return "http://" + exchange.getRequestHeaders().get("HOST").get(0); } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java index 8e9ddbd7d8189..05b931817fea4 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java @@ -65,7 +65,7 @@ protected HttpHandler createHandler(final String[] args) { }; } - protected String buildCredentialResponse(final String ec2AccessKey, final String ec2SessionToken) { + protected static String buildCredentialResponse(final String ec2AccessKey, final String ec2SessionToken) { return String.format(Locale.ROOT, """ { "AccessKeyId": "%s", diff --git a/test/framework/src/integTest/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java b/test/framework/src/integTest/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java index eb82f8b60c6fb..6d4fe25705688 100644 --- a/test/framework/src/integTest/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java +++ b/test/framework/src/integTest/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java @@ -144,7 +144,7 @@ public void testTransportRespondsEventually() throws InterruptedException { assertEquals("All requests must respond, requests: " + requests, 0, latch.getCount()); } - private Tuple findDisruptedPair(NetworkDisruption.DisruptedLinks disruptedLinks) { + private static Tuple findDisruptedPair(NetworkDisruption.DisruptedLinks disruptedLinks) { Optional> disruptedPair = disruptedLinks.nodes() .stream() .flatMap(n1 -> disruptedLinks.nodes().stream().map(n2 -> Tuple.tuple(n1, n2))) @@ -161,7 +161,7 @@ private Tuple findDisruptedPair(NetworkDisru return disruptedPair.get(); } - private void sendRequest(TransportService source, TransportService target, CountDownLatch latch) { + private static void sendRequest(TransportService source, TransportService target, CountDownLatch latch) { source.sendRequest(target.getLocalNode(), ClusterHealthAction.NAME, new ClusterHealthRequest(), new TransportResponseHandler<>() { private AtomicBoolean responded = new AtomicBoolean(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index f93cc91602b34..57523d4a47e0b 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -278,7 +278,6 @@ public class Cluster implements Releasable { private final Set blackholedNodes = new HashSet<>(); private final Set> blackholedConnections = new HashSet<>(); private final Map committedStatesByVersion = new HashMap<>(); - private final LinearizabilityChecker linearizabilityChecker = new LinearizabilityChecker(); private final History history = new History(); private final CountingPageCacheRecycler countingPageCacheRecycler; private final Recycler recycler; @@ -757,7 +756,7 @@ private void stabilise(long stabilisationDurationMillis, boolean expectIdleJoinV if (history.size() > 300) { scheduler.schedule(() -> abort.set(true), 10, TimeUnit.SECONDS); } - final boolean linearizable = linearizabilityChecker.isLinearizable(spec, history, i -> null, abort::get); + final boolean linearizable = LinearizabilityChecker.isLinearizable(spec, history, i -> null, abort::get); if (abort.get() == false) { assertTrue("history not linearizable: " + history, linearizable); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java index 042a59179a751..ceca88ef7573e 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java @@ -214,7 +214,7 @@ public String toString() { * @param missingResponseGenerator used to complete the history with missing responses * @return true iff the history is linearizable w.r.t. the given spec */ - public boolean isLinearizable(SequentialSpec spec, History history, Function missingResponseGenerator) { + public static boolean isLinearizable(SequentialSpec spec, History history, Function missingResponseGenerator) { return isLinearizable(spec, history, missingResponseGenerator, () -> false); } @@ -227,7 +227,7 @@ public boolean isLinearizable(SequentialSpec spec, History history, Function missingResponseGenerator, @@ -239,7 +239,7 @@ public boolean isLinearizable( return partitions.stream().allMatch(h -> isLinearizable(spec, h, terminateEarly)); } - private boolean isLinearizable(SequentialSpec spec, List history, BooleanSupplier terminateEarly) { + private static boolean isLinearizable(SequentialSpec spec, List history, BooleanSupplier terminateEarly) { logger.debug("Checking history of size: {}: {}", history.size(), history); Object state = spec.initialState(); // the current state of the datatype final FixedBitSet linearized = new FixedBitSet(history.size() / 2); // the linearized prefix of the history @@ -290,7 +290,7 @@ private boolean isLinearizable(SequentialSpec spec, List history, Boolean /** * Convenience method for {@link #isLinearizable(SequentialSpec, History, Function)} that requires the history to be complete */ - public boolean isLinearizable(SequentialSpec spec, History history) { + public static boolean isLinearizable(SequentialSpec spec, History history) { return isLinearizable(spec, history, o -> { throw new IllegalArgumentException("history is not complete"); }); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 8c60045b13ede..825325c00a70b 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -455,11 +455,11 @@ public void describeTo(Description description) { .appendValue(generation); } - private String parseDataStreamName(String backingIndexName, int indexOfLastDash) { + private static String parseDataStreamName(String backingIndexName, int indexOfLastDash) { return backingIndexName.substring(4, backingIndexName.lastIndexOf('-', indexOfLastDash - 1)); } - private int parseGeneration(String backingIndexName, int indexOfLastDash) { + private static int parseGeneration(String backingIndexName, int indexOfLastDash) { return Integer.parseInt(backingIndexName.substring(indexOfLastDash + 1)); } }; diff --git a/test/framework/src/main/java/org/elasticsearch/common/inject/ModuleTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/inject/ModuleTestCase.java index 99b0235f772cf..df8c0e9ad4b32 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/inject/ModuleTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/inject/ModuleTestCase.java @@ -25,7 +25,7 @@ public abstract class ModuleTestCase extends ESTestCase { * Configures the module, and ensures an instance is bound to the "to" class, and the * provided tester returns true on the instance. */ - public void assertInstanceBinding(Module module, Class to, Predicate tester) { + public static void assertInstanceBinding(Module module, Class to, Predicate tester) { assertInstanceBindingWithAnnotation(module, to, tester, null); } @@ -33,7 +33,7 @@ public void assertInstanceBinding(Module module, Class to, Predicate t * Like {@link #assertInstanceBinding(Module, Class, Predicate)}, but filters the * classes checked by the given annotation. */ - private void assertInstanceBindingWithAnnotation( + private static void assertInstanceBindingWithAnnotation( Module module, Class to, Predicate tester, diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index ee878b6eff4e0..d4e9b1ad3b84c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -60,7 +60,7 @@ public TranslogHandler(NamedXContentRegistry xContentRegistry, IndexSettings ind ); } - private void applyOperation(Engine engine, Engine.Operation operation) throws IOException { + private static void applyOperation(Engine engine, Engine.Operation operation) throws IOException { switch (operation.operationType()) { case INDEX -> engine.index((Engine.Index) operation); case DELETE -> engine.delete((Engine.Delete) operation); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index bab885bd1053f..168ab8663a153 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -129,7 +129,7 @@ protected static IndexAnalyzers createIndexAnalyzers() { return IndexAnalyzers.of(Map.of("default", new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()))); } - protected final String randomIndexOptions() { + protected static String randomIndexOptions() { return randomFrom("docs", "freqs", "positions", "offsets"); } @@ -239,7 +239,7 @@ protected static IndexSettings createIndexSettings(IndexVersion version, Setting return new IndexSettings(meta, settings); } - protected final void withLuceneIndex( + protected static void withLuceneIndex( MapperService mapperService, CheckedConsumer builder, CheckedConsumer test @@ -258,22 +258,25 @@ protected final void withLuceneIndex( /** * Build a {@link SourceToParse} with the id {@code "1"} and without any dynamic templates. */ - protected final SourceToParse source(CheckedConsumer build) throws IOException { + protected static SourceToParse source(CheckedConsumer build) throws IOException { return source("1", build, null); } /** * Build a {@link SourceToParse} without any dynamic templates. */ - protected final SourceToParse source(@Nullable String id, CheckedConsumer build, @Nullable String routing) - throws IOException { + protected static SourceToParse source( + @Nullable String id, + CheckedConsumer build, + @Nullable String routing + ) throws IOException { return source(id, build, routing, Map.of()); } /** * Build a {@link SourceToParse}. */ - protected final SourceToParse source( + protected static SourceToParse source( @Nullable String id, CheckedConsumer build, @Nullable String routing, @@ -295,35 +298,35 @@ protected static SourceToParse source(String source) { /** * Merge a new mapping into the one in the provided {@link MapperService}. */ - protected final void merge(MapperService mapperService, XContentBuilder mapping) throws IOException { + protected static void merge(MapperService mapperService, XContentBuilder mapping) throws IOException { merge(mapperService, MapperService.MergeReason.MAPPING_UPDATE, mapping); } /** * Merge a new mapping into the one in the provided {@link MapperService}. */ - protected final void merge(MapperService mapperService, String mapping) throws IOException { + protected static void merge(MapperService mapperService, String mapping) throws IOException { mapperService.merge(null, new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); } - protected final void merge(MapperService mapperService, MapperService.MergeReason reason, String mapping) throws IOException { + protected static void merge(MapperService mapperService, MapperService.MergeReason reason, String mapping) throws IOException { mapperService.merge(null, new CompressedXContent(mapping), reason); } /** * Merge a new mapping into the one in the provided {@link MapperService} with a specific {@code MergeReason} */ - protected final void merge(MapperService mapperService, MapperService.MergeReason reason, XContentBuilder mapping) throws IOException { + protected static void merge(MapperService mapperService, MapperService.MergeReason reason, XContentBuilder mapping) throws IOException { mapperService.merge(null, new CompressedXContent(BytesReference.bytes(mapping)), reason); } - protected final XContentBuilder topMapping(CheckedConsumer buildFields) throws IOException { + protected static XContentBuilder topMapping(CheckedConsumer buildFields) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc"); buildFields.accept(builder); return builder.endObject().endObject(); } - protected final XContentBuilder mappingNoSubobjects(CheckedConsumer buildFields) throws IOException { + protected static XContentBuilder mappingNoSubobjects(CheckedConsumer buildFields) throws IOException { return topMapping(xContentBuilder -> { xContentBuilder.field("subobjects", false); xContentBuilder.startObject("properties"); @@ -332,19 +335,19 @@ protected final XContentBuilder mappingNoSubobjects(CheckedConsumer buildFields) throws IOException { + protected static XContentBuilder mapping(CheckedConsumer buildFields) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties"); buildFields.accept(builder); return builder.endObject().endObject().endObject(); } - protected final XContentBuilder dynamicMapping(Mapping dynamicMapping) throws IOException { + protected static XContentBuilder dynamicMapping(Mapping dynamicMapping) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); dynamicMapping.toXContent(builder, ToXContent.EMPTY_PARAMS); return builder.endObject(); } - protected final XContentBuilder fieldMapping(CheckedConsumer buildField) throws IOException { + protected static XContentBuilder fieldMapping(CheckedConsumer buildField) throws IOException { return mapping(b -> { b.startObject("field"); buildField.accept(b); @@ -352,7 +355,7 @@ protected final XContentBuilder fieldMapping(CheckedConsumer buildField) throws IOException { + protected static XContentBuilder runtimeFieldMapping(CheckedConsumer buildField) throws IOException { return runtimeMapping(b -> { b.startObject("field"); buildField.accept(b); @@ -360,7 +363,7 @@ protected final XContentBuilder runtimeFieldMapping(CheckedConsumer buildFields) throws IOException { + protected static XContentBuilder runtimeMapping(CheckedConsumer buildFields) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("runtime"); buildFields.accept(builder); return builder.endObject().endObject().endObject(); @@ -723,7 +726,7 @@ private void roundTripSyntheticSource(DocumentMapper mapper, String syntheticSou } } - private String syntheticSource(DocumentMapper mapper, IndexReader reader, int docId) throws IOException { + private static String syntheticSource(DocumentMapper mapper, IndexReader reader, int docId) throws IOException { SourceProvider provider = SourceProvider.fromSyntheticSource(mapper.mapping()); Source synthetic = provider.getSource(getOnlyLeafReader(reader).getContext(), docId); return synthetic.internalSourceRef().utf8ToString(); @@ -752,7 +755,7 @@ protected void validateRoundTripReader(String syntheticSource, DirectoryReader r ); } - protected final XContentBuilder syntheticSourceMapping(CheckedConsumer buildFields) throws IOException { + protected static XContentBuilder syntheticSourceMapping(CheckedConsumer buildFields) throws IOException { return topMapping(b -> { b.startObject("_source").field("mode", "synthetic").endObject(); b.startObject("properties"); @@ -761,7 +764,7 @@ protected final XContentBuilder syntheticSourceMapping(CheckedConsumer buildField) + protected static XContentBuilder syntheticSourceFieldMapping(CheckedConsumer buildField) throws IOException { return syntheticSourceMapping(b -> { b.startObject("field"); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index bc58a792cefc6..692f7c6810254 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -556,7 +556,7 @@ protected final List fetchFromDocValues(MapperService mapperService, MappedFi return result.get(); } - protected final void assertScriptDocValues(MapperService mapperService, Object sourceValue, Matcher> dvMatcher) + protected static void assertScriptDocValues(MapperService mapperService, Object sourceValue, Matcher> dvMatcher) throws IOException { withLuceneIndex(mapperService, iw -> { iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field("field", sourceValue))).rootDoc()); diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index a05b2510bc056..ad515078fc1c5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -142,7 +142,7 @@ protected IndexMetadata buildIndexMetadata(int replicas, Settings indexSettings, return metadata.build(); } - IndexRequest copyIndexRequest(IndexRequest inRequest) throws IOException { + static IndexRequest copyIndexRequest(IndexRequest inRequest) throws IOException { try (BytesStreamOutput out = new BytesStreamOutput()) { inRequest.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { @@ -151,7 +151,7 @@ IndexRequest copyIndexRequest(IndexRequest inRequest) throws IOException { } } - protected DiscoveryNode getDiscoveryNode(String id) { + protected static DiscoveryNode getDiscoveryNode(String id) { return DiscoveryNodeUtils.builder(id).name(id).roles(Collections.singleton(DiscoveryNodeRole.DATA_ROLE)).build(); } diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java index 05828a572418c..d5d8057b72405 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java @@ -468,7 +468,7 @@ public void sendRequest( // We only use this method in IndexRecoveryWithSnapshotsIT that's located in the x-pack plugin // that implements snapshot based recoveries. - private void createSnapshotThatCanBeUsedDuringRecovery(String indexName) throws Exception { + private static void createSnapshotThatCanBeUsedDuringRecovery(String indexName) throws Exception { // Ensure that the safe commit == latest commit assertBusy(() -> { ShardStats stats = indicesAdmin().prepareStats(indexName) diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index 38a44bae48543..c0377f8ef1b9a 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -223,7 +223,7 @@ protected void assertCleanupResponse(CleanupRepositoryResponse response, long by assertThat(response.result().bytes(), equalTo(3L + 2 * 3L)); } - private void createDanglingIndex(final BlobStoreRepository repo, final Executor genericExec) throws Exception { + private static void createDanglingIndex(final BlobStoreRepository repo, final Executor genericExec) throws Exception { final PlainActionFuture future = PlainActionFuture.newFuture(); genericExec.execute(ActionRunnable.run(future, () -> { final BlobStore blobStore = repo.blobStore(); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index e45c962c9960c..7842f2fb9dfd7 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -141,7 +141,7 @@ public void tearDownHttpServer() { } } - protected void assertEmptyRepo(Map blobsMap) { + protected static void assertEmptyRepo(Map blobsMap) { List blobs = blobsMap.keySet().stream().filter(blob -> blob.contains("index") == false).collect(Collectors.toList()); assertThat("Only index blobs should remain in repository but found " + blobs, blobs, hasSize(0)); } diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index bd313a7d9e0ee..130eca43e7a33 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -415,7 +415,7 @@ public Set> getSupportedContexts() { ); } - private Map createVars(Map params) { + private static Map createVars(Map params) { Map vars = new HashMap<>(); vars.put("params", params); return vars; diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 5a28bd8b0ea6d..c5a9a9ae7c6de 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -416,7 +416,7 @@ protected List objectMappers() { /** * Build a {@link SubSearchContext}s to power {@code top_hits}. */ - private SubSearchContext buildSubSearchContext( + private static SubSearchContext buildSubSearchContext( IndexSettings indexSettings, SearchExecutionContext searchExecutionContext, BitsetFilterCache bitsetFilterCache @@ -834,7 +834,7 @@ protected void debugTestCase( } } - private void collectDebugInfo(String prefix, Aggregator aggregator, Map> allDebug) { + private static void collectDebugInfo(String prefix, Aggregator aggregator, Map> allDebug) { Map debug = new HashMap<>(); aggregator.collectDebugInfo((key, value) -> { Object old = debug.put(key, value); @@ -869,7 +869,7 @@ protected void withAggregator( } } - private void verifyMetricNames( + private static void verifyMetricNames( ValuesSourceAggregationBuilder.MetricsAggregationBuilder aggregationBuilder, InternalAggregation agg ) { @@ -1111,7 +1111,7 @@ public void testSupportedFieldTypes() throws IOException { } } - private ValuesSourceType fieldToVST(MappedFieldType fieldType) { + private static ValuesSourceType fieldToVST(MappedFieldType fieldType) { return fieldType.fielddataBuilder(FieldDataContext.noRuntimeFields("test")).build(null, null).getValuesSourceType(); } @@ -1121,7 +1121,7 @@ private ValuesSourceType fieldToVST(MappedFieldType fieldType) { * Throws an exception if it encounters an unknown field type, to prevent new ones from sneaking in without * being tested. */ - private void writeTestDoc(MappedFieldType fieldType, String fieldName, RandomIndexWriter iw) throws IOException { + private static void writeTestDoc(MappedFieldType fieldType, String fieldName, RandomIndexWriter iw) throws IOException { String typeName = fieldType.typeName(); ValuesSourceType vst = fieldToVST(fieldType); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index 4901b94b6dab3..d3012c4779024 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -264,7 +264,7 @@ private double[] randomLatLng() { return new double[] { lat, lon }; } - private boolean validPoint(double x, double y, GeoBoundingBox bbox) { + private static boolean validPoint(double x, double y, GeoBoundingBox bbox) { if (bbox == null) { return true; } @@ -279,7 +279,7 @@ private boolean validPoint(double x, double y, GeoBoundingBox bbox) { return false; } - private boolean intersectsBounds(Rectangle pointTile, GeoBoundingBox bbox) { + private static boolean intersectsBounds(Rectangle pointTile, GeoBoundingBox bbox) { if (bbox == null) { return true; } diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java index 3e650faa82b87..fecaeac1c85a2 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BaseShapeIntegTestCase.java @@ -459,7 +459,7 @@ public void testBulk() throws Exception { protected abstract void doDistanceAndBoundingBoxTest(String key); - private String findNodeName(String index) { + private static String findNodeName(String index) { ClusterState state = clusterAdmin().prepareState().get().getState(); IndexShardRoutingTable shard = state.getRoutingTable().index(index).shard(0); String nodeId = shard.assignedShards().get(0).currentNodeId(); diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeIntegTestCase.java index d451689350a43..029a9f753a365 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/GeoShapeIntegTestCase.java @@ -99,7 +99,7 @@ protected void doDistanceAndBoundingBoxTest(String key) { } } - private double distance(double lat1, double lon1, double lat2, double lon2) { + private static double distance(double lat1, double lon1, double lat2, double lon2) { return SloppyMath.haversinMeters(lat1, lon1, lat2, lon2); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 4956985c78a97..a557841ba34bd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -844,7 +844,7 @@ public static void setReplicaCount(int replicas, String index) { updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replicas), index); } - private Settings.Builder getExcludeSettings(int num, Settings.Builder builder) { + private static Settings.Builder getExcludeSettings(int num, Settings.Builder builder) { String exclude = String.join(",", internalCluster().allDataNodesButN(num)); builder.put("index.routing.allocation.exclude._name", exclude); return builder; @@ -1308,7 +1308,7 @@ protected void ensureClusterStateCanBeReadByNodeTool() throws IOException { } } - private void ensureClusterInfoServiceRunning() { + private static void ensureClusterInfoServiceRunning() { if (isInternalCluster() && cluster().size() > 0) { // ensures that the cluster info service didn't leak its async task, which would prevent future refreshes refreshClusterInfo(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index 530eafb84d47b..c913214a54718 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -392,7 +392,7 @@ private AggregationBuilder mockBuilder(String name, Map subNames return b; } - private void collectSubBuilderNames(Map names, InternalAggregation result) { + private static void collectSubBuilderNames(Map names, InternalAggregation result) { result.forEachBucket(ia -> { for (InternalAggregation a : ia.copyResults()) { @SuppressWarnings("unchecked") diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java index 3690750d94b70..18898e72c110c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java @@ -188,7 +188,7 @@ public boolean isDisruptedNodeThread(String threadName) { return threadName.contains("[" + disruptedNode + "]"); } - private String stackTrace(StackTraceElement[] stackTraceElements) { + private static String stackTrace(StackTraceElement[] stackTraceElements) { return Arrays.stream(stackTraceElements).map(Object::toString).collect(Collectors.joining("\n")); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index 0a94edb0527f6..5844dcbd66471 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -102,7 +102,7 @@ public void testFailure(Failure failure) throws Exception { printToErr(b.toString()); } - private boolean isRestApiCompatibilityTest() { + private static boolean isRestApiCompatibilityTest() { return Boolean.parseBoolean(System.getProperty("tests.restCompat", "false")); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 4e769881c39a0..a79d952ac5b27 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -878,7 +878,7 @@ private void checkForUnexpectedlyRecreatedObjects() throws IOException { ); } - private Set getAllUnexpectedIlmPolicies(Set exclusions) throws IOException { + private static Set getAllUnexpectedIlmPolicies(Set exclusions) throws IOException { Map policies; try { Response response = adminClient().performRequest(new Request("GET", "/_ilm/policy")); @@ -1063,7 +1063,7 @@ protected void deleteRepository(String repoName) throws IOException { /** * Remove any cluster settings. */ - private void wipeClusterSettings() throws IOException { + private static void wipeClusterSettings() throws IOException { Map getResponse = entityAsMap(adminClient().performRequest(new Request("GET", "/_cluster/settings"))); boolean mustClear = false; @@ -1169,7 +1169,7 @@ protected static RefreshResponse refresh(RestClient client, String index) throws return RefreshResponse.fromXContent(responseAsParser(response)); } - private void waitForPendingRollupTasks() throws Exception { + private static void waitForPendingRollupTasks() throws Exception { waitForPendingTasks(adminClient(), taskName -> taskName.startsWith("xpack/rollup/job") == false); } @@ -1277,7 +1277,7 @@ private void logIfThereAreRunningTasks() throws IOException { * Waits for the cluster state updates to have been processed, so that no cluster * state updates are still in-progress when the next test starts. */ - private void waitForClusterStateUpdatesToFinish() throws Exception { + private static void waitForClusterStateUpdatesToFinish() throws Exception { assertBusy(() -> { try { Response response = adminClient().performRequest(new Request("GET", "/_cluster/pending_tasks")); @@ -1438,7 +1438,7 @@ protected static void doConfigureClient(RestClientBuilder builder, Settings sett } @SuppressWarnings("unchecked") - private Set runningTasks(Response response) throws IOException { + private static Set runningTasks(Response response) throws IOException { Set runningTasks = new HashSet<>(); Map nodes = (Map) entityAsMap(response).get("nodes"); @@ -2042,7 +2042,7 @@ protected static IndexVersion minimumIndexVersion() throws IOException { } @SuppressWarnings("unchecked") - private void ensureGlobalCheckpointSynced(String index) throws Exception { + private static void ensureGlobalCheckpointSynced(String index) throws Exception { assertBusy(() -> { Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); List> shardStats = (List>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java index 94c9dc338496a..dcd6f0e7f2e26 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ObjectPath.java @@ -82,7 +82,7 @@ public T evaluate(String path, Stash stash) throws IOException { } @SuppressWarnings("unchecked") - private Object evaluate(String key, Object objectToEvaluate, Stash stash) throws IOException { + private static Object evaluate(String key, Object objectToEvaluate, Stash stash) throws IOException { if (stash.containsStashedValue(key)) { key = stash.getValue(key).toString(); } @@ -119,7 +119,7 @@ private Object evaluate(String key, Object objectToEvaluate, Stash stash) throws ); } - private String[] parsePath(String path) { + private static String[] parsePath(String path) { List list = new ArrayList<>(); StringBuilder current = new StringBuilder(); boolean escape = false; diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryFactory.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryFactory.java index bb84d1dfd321c..1892f76628a79 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryFactory.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryFactory.java @@ -116,7 +116,7 @@ public static void checkIndex(Logger logger, Store store, ShardId shardId) { } } - private Directory wrap(Directory dir, Random random, Settings indexSettings, ShardId shardId) { + private static Directory wrap(Directory dir, Random random, Settings indexSettings, ShardId shardId) { double randomIOExceptionRate = RANDOM_IO_EXCEPTION_RATE_SETTING.get(indexSettings); double randomIOExceptionRateOnOpen = RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.get(indexSettings); @@ -135,7 +135,7 @@ private Directory wrap(Directory dir, Random random, Settings indexSettings, Sha return w; } - private Directory randomDirectoryService(Random random, IndexSettings indexSettings, ShardPath path) throws IOException { + private static Directory randomDirectoryService(Random random, IndexSettings indexSettings, ShardPath path) throws IOException { final IndexMetadata build = IndexMetadata.builder(indexSettings.getIndexMetadata()) .settings( Settings.builder() diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index 2f03303115e5c..dd766cd86cd7f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -49,7 +49,7 @@ public CapturedRequest[] getCapturedRequestsAndClear() { return requests.toArray(new CapturedRequest[0]); } - private Map> groupRequestsByTargetNode(Collection requests) { + private static Map> groupRequestsByTargetNode(Collection requests) { Map> result = new HashMap<>(); for (CapturedRequest request : requests) { result.computeIfAbsent(request.node.getId(), node -> new ArrayList<>()).add(request); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 672fd33ccb2af..3b42181216bcb 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -3367,7 +3367,7 @@ public void writeTo(StreamOutput out) throws IOException { } } - private long[] getConstantMessageSizeHistogram(int count, long size) { + private static long[] getConstantMessageSizeHistogram(int count, long size) { final var histogram = new long[29]; int bucket = 0; long bucketLowerBound = 8; diff --git a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java index bbf578429a138..bd51c74ee8e47 100644 --- a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java +++ b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java @@ -467,7 +467,7 @@ private void checkArrayArgs( } // counts how many times argAndField was called on the method chain - private int getChainedParams(AbstractInsnNode startNode) { + private static int getChainedParams(AbstractInsnNode startNode) { int c = 0; AbstractInsnNode current = startNode; while (current.getNext() != null) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java index f00e6f13cb314..0c839aeddd189 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/WaitForHttpResource.java @@ -197,7 +197,7 @@ private KeyStore buildTrustStoreFromCA() throws GeneralSecurityException, IOExce return store; } - private SSLContext createSslContext(KeyStore trustStore) throws GeneralSecurityException { + private static SSLContext createSslContext(KeyStore trustStore) throws GeneralSecurityException { checkForTrustEntry(trustStore); TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); tmf.init(trustStore); @@ -206,7 +206,7 @@ private SSLContext createSslContext(KeyStore trustStore) throws GeneralSecurityE return sslContext; } - private void checkForTrustEntry(KeyStore trustStore) throws KeyStoreException { + private static void checkForTrustEntry(KeyStore trustStore) throws KeyStoreException { Enumeration enumeration = trustStore.aliases(); while (enumeration.hasMoreElements()) { if (trustStore.isCertificateEntry(enumeration.nextElement())) { diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ProcessReaper.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ProcessReaper.java index 7d3d19a204cbe..2c0eb9d2cea3d 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ProcessReaper.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/util/ProcessReaper.java @@ -146,7 +146,7 @@ private void ensureReaperAlive() { } } - private Path findJavaHome() { + private static Path findJavaHome() { Path javaBase = Path.of(System.getProperty("java.home")); return javaBase.endsWith("jre") && Files.exists(javaBase.getParent().resolve("bin/java")) ? javaBase.getParent() : javaBase; } diff --git a/test/x-content/src/main/java/org/elasticsearch/test/xcontent/AbstractSchemaValidationTestCase.java b/test/x-content/src/main/java/org/elasticsearch/test/xcontent/AbstractSchemaValidationTestCase.java index 8ac2783c170b9..44ebb89b6c2c0 100644 --- a/test/x-content/src/main/java/org/elasticsearch/test/xcontent/AbstractSchemaValidationTestCase.java +++ b/test/x-content/src/main/java/org/elasticsearch/test/xcontent/AbstractSchemaValidationTestCase.java @@ -129,7 +129,7 @@ private JsonSchemaFactory initializeSchemaFactory() { * Note: we might not catch all places, but at least it works for nested objects and * array items. */ - private void assertSchemaStrictness(Collection validatorSet, String path) { + private static void assertSchemaStrictness(Collection validatorSet, String path) { boolean additionalPropertiesValidatorFound = false; boolean subSchemaFound = false; diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java index a4879701fa966..b09250e1527f3 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java @@ -68,7 +68,7 @@ public ClientYamlTestResponse(Response response) throws IOException { * Also in testing there is no access to media types defined outside of XContentType. * Therefore a null has to be returned if a response content-type has a mediatype not defined in XContentType. */ - private XContentType getContentTypeIgnoreExceptions(String contentType) { + private static XContentType getContentTypeIgnoreExceptions(String contentType) { try { return XContentType.fromMediaType(contentType); } catch (IllegalArgumentException e) { diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParser.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParser.java index 5ef863285fbbc..d135c56cdde01 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParser.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApiParser.java @@ -267,7 +267,7 @@ public ClientYamlSuiteRestApi parse(String location, XContentParser parser) thro return restApi; } - private List getStringsFromArray(XContentParser parser, String key) throws IOException { + private static List getStringsFromArray(XContentParser parser, String key) throws IOException { return parser.list().stream().filter(Objects::nonNull).map(o -> { if (o instanceof String) { return (String) o; diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 79ec4825fca21..0220c0931bca1 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -510,7 +510,7 @@ void checkWarningHeaders(final List warningHeaders, String testPath) { } } - private void appendBadHeaders(final StringBuilder sb, final List headers, final String message) { + private static void appendBadHeaders(final StringBuilder sb, final List headers, final String message) { if (headers.isEmpty() == false) { sb.append(message).append(" [\n"); for (final String header : headers) { diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java index 4ca1dc2f1d092..25065b6ced6e2 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/LicenseSigner.java @@ -96,7 +96,7 @@ public License sign(License licenseSpec) throws IOException { return License.builder().fromLicenseSpec(licenseSpec, Base64.getEncoder().encodeToString(bytes)).build(); } - private byte[] getPublicKeyFingerprint(byte[] keyBytes) { + private static byte[] getPublicKeyFingerprint(byte[] keyBytes) { MessageDigest sha256 = MessageDigests.sha256(); sha256.update(keyBytes); return sha256.digest(); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregator.java index db9e10c167209..ce152577864a4 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregator.java @@ -73,7 +73,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, AggregationRe } } - private AbstractHyperLogLogPlusPlus resolveBucketValue( + private static AbstractHyperLogLogPlusPlus resolveBucketValue( MultiBucketsAggregation agg, InternalMultiBucketAggregation.InternalBucket bucket, String aggPath diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregator.java index 6e9c9812e9e95..90f22340edb0d 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregator.java @@ -164,7 +164,7 @@ private void reduceHDR( } } - private PercentileConfig resolvePercentileConfig( + private static PercentileConfig resolvePercentileConfig( MultiBucketsAggregation agg, InternalMultiBucketAggregation.InternalBucket bucket, String aggPath @@ -194,7 +194,7 @@ private PercentileConfig resolvePercentileConfig( throw buildResolveError(agg, aggPathsList, propertyValue, "percentiles"); } - private TDigestState resolveTDigestBucketValue( + private static TDigestState resolveTDigestBucketValue( MultiBucketsAggregation agg, InternalMultiBucketAggregation.InternalBucket bucket, String aggPath @@ -207,7 +207,7 @@ private TDigestState resolveTDigestBucketValue( return ((InternalTDigestPercentiles) propertyValue).getState(); } - private DoubleHistogram resolveHDRBucketValue( + private static DoubleHistogram resolveHDRBucketValue( MultiBucketsAggregation agg, InternalMultiBucketAggregation.InternalBucket bucket, String aggPath @@ -220,7 +220,7 @@ private DoubleHistogram resolveHDRBucketValue( return ((InternalHDRPercentiles) propertyValue).getState(); } - private IllegalArgumentException buildResolveError( + private static IllegalArgumentException buildResolveError( MultiBucketsAggregation agg, List aggPathsList, Object propertyValue, @@ -253,7 +253,7 @@ private IllegalArgumentException buildResolveError( } } - private int clamp(int index, int length) { + private static int clamp(int index, int length) { if (index < 0) { return 0; } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java index 5d6b0ec760acd..7633d38d7ebba 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java @@ -144,7 +144,7 @@ List termValuesList(LeafReaderContext ctx) throws IOException { return termValuesList; } - List> docTerms(List termValuesList, int doc) throws IOException { + static List> docTerms(List termValuesList, int doc) throws IOException { List> terms = new ArrayList<>(); for (TermValues termValues : termValuesList) { List collectValues = termValues.collectValues(doc); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java index df53a40a0870c..0f74e3466dd0a 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java @@ -86,7 +86,7 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, return new LeafBucketCollectorBase(sub, docAValues) { - private void processValues( + private static void processValues( int doc, long bucket, SortedNumericDoubleValues docValues, @@ -116,7 +116,7 @@ public void collect(int doc, long bucket) throws IOException { }; } - private Bits getBits(LeafReaderContext ctx, Weight weight) throws IOException { + private static Bits getBits(LeafReaderContext ctx, Weight weight) throws IOException { if (weight == null) { return null; } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java index 1c20ab8ee62a3..672ddad9ea189 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java @@ -75,7 +75,7 @@ private void validate(final String deciderName, final Settings configuration, So configuration.keySet().forEach(key -> validateSetting(key, configuration, deciderSettings, deciderName)); } - private void validateSetting(String key, Settings configuration, Map> deciderSettings, String decider) { + private static void validateSetting(String key, Settings configuration, Map> deciderSettings, String decider) { Setting setting = deciderSettings.get(key); if (setting == null) { throw new IllegalArgumentException("unknown setting [" + key + "] for decider [" + decider + "]"); @@ -212,7 +212,7 @@ DefaultAutoscalingDeciderContext createContext( * Check if the policy has unknown roles. This can only happen in mixed clusters, where one master can accept a policy but if it fails * over to an older master before it is also upgraded, one of the roles might not be known. */ - private boolean hasUnknownRoles(AutoscalingPolicy policy) { + private static boolean hasUnknownRoles(AutoscalingPolicy policy) { return DiscoveryNodeRole.roleNames().containsAll(policy.roles()) == false; } @@ -338,7 +338,7 @@ private AutoscalingCapacity.AutoscalingResources resourcesFor(DiscoveryNode node ); } - private long totalStorage(Map diskUsages, DiscoveryNode node) { + private static long totalStorage(Map diskUsages, DiscoveryNode node) { DiskUsage diskUsage = diskUsages.get(node.getId()); return diskUsage != null ? diskUsage.getTotalBytes() : -1; } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java index 578689828de6b..2380082fbf66e 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java @@ -191,7 +191,7 @@ private void sendToMissingNodes(Function nodeLookup, Set< ); } - private Set> calculateAutoscalingRoleSets(ClusterState state) { + private static Set> calculateAutoscalingRoleSets(ClusterState state) { AutoscalingMetadata autoscalingMetadata = state.metadata().custom(AutoscalingMetadata.NAME); if (autoscalingMetadata != null) { return autoscalingMetadata.policies() @@ -199,13 +199,13 @@ private Set> calculateAutoscalingRoleSets(ClusterState st .stream() .map(AutoscalingPolicyMetadata::policy) .map(AutoscalingPolicy::roles) - .map(this::toRoles) + .map(AutoscalingNodeInfoService::toRoles) .collect(Collectors.toSet()); } return Set.of(); } - private Set toRoles(SortedSet roleNames) { + private static Set toRoles(SortedSet roleNames) { return roleNames.stream().map(DiscoveryNodeRole::getRoleFromRoleName).collect(Collectors.toSet()); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderService.java index 80c626e8b24d7..c1221f371076f 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderService.java @@ -48,7 +48,7 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider List indicesNeedingFrozen = context.state() .metadata() .stream() - .filter(this::needsTier) + .filter(FrozenExistenceDeciderService::isFrozenPhase) .map(imd -> imd.getIndex().getName()) .limit(10) .collect(Collectors.toList()); @@ -63,10 +63,6 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider return new AutoscalingDeciderResult(builder.build(), new FrozenExistenceReason(indicesNeedingFrozen)); } - boolean needsTier(IndexMetadata idxMeta) { - return isFrozenPhase(idxMeta); - } - @Override public List> deciderSettings() { return Collections.emptyList(); diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index c94ffd5b20798..fc8eedbe1ca75 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -544,7 +544,14 @@ boolean needsThisTier(ShardRouting shard, RoutingAllocation allocation) { IndexMetadata indexMetadata = indexMetadata(shard, allocation); Set decisionTypes = allocation.routingNodes() .stream() - .map(node -> DataTierAllocationDecider.shouldFilter(indexMetadata, node.node(), this::highestPreferenceTier, allocation)) + .map( + node -> DataTierAllocationDecider.shouldFilter( + indexMetadata, + node.node(), + AllocationState::highestPreferenceTier, + allocation + ) + ) .map(Decision::type) .collect(Collectors.toSet()); if (decisionTypes.contains(Decision.Type.NO)) { @@ -579,11 +586,11 @@ private static boolean isAssignedToTier(IndexMetadata indexMetadata, Set highestPreferenceTier( + private static Optional highestPreferenceTier( List preferredTiers, DiscoveryNodes unused, DesiredNodes desiredNodes, diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java index 96b1848f40597..5eb146102cd76 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/SparseFileTracker.java @@ -319,7 +319,7 @@ public boolean waitForRangeIfPending(final ByteRange range, final ActionListener return true; } - private void subscribeToCompletionListeners(List requiredRanges, long rangeEnd, ActionListener listener) { + private static void subscribeToCompletionListeners(List requiredRanges, long rangeEnd, ActionListener listener) { // NB we work with ranges outside the mutex here, but only to interact with their completion listeners which are `final` so // there is no risk of concurrent modification. switch (requiredRanges.size()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java index 39979d89ed8b8..891e8343ffe0f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java @@ -266,7 +266,7 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String clusterService.submitUnbatchedStateUpdateTask(source, task); } - private boolean licenseIsCompatible(License license, Version version) { + private static boolean licenseIsCompatible(License license, Version version) { final int maxVersion = LicenseUtils.getMaxLicenseVersion(version); return license.version() <= maxVersion; } @@ -276,7 +276,7 @@ private boolean isAllowedLicenseType(License.LicenseType type) { return allowedLicenseTypes.contains(type); } - private TimeValue days(int days) { + private static TimeValue days(int days) { return TimeValue.timeValueHours(days * 24); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java index d51059ec7810f..adc0d66353608 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java @@ -97,7 +97,7 @@ public void onFailure(@Nullable Exception e) { listener.onFailure(e); } - private boolean shouldGenerateNewBasicLicense(License currentLicense) { + private static boolean shouldGenerateNewBasicLicense(License currentLicense) { return currentLicense == null || License.LicenseType.isBasic(currentLicense.type()) == false || LicenseSettings.SELF_GENERATED_LICENSE_MAX_NODES != currentLicense.maxNodes() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java index cb22b11a5be98..50485ecc21d9a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java @@ -166,7 +166,7 @@ private LiveDocs getLiveDocs(LeafReader reader) throws IOException { return new LiveDocs(reader.numDeletedDocs(), reader.getLiveDocs()); } - private int apply(DocIdSetIterator iterator, FixedBitSet bits) throws IOException { + private static int apply(DocIdSetIterator iterator, FixedBitSet bits) throws IOException { int docID = -1; int newDeletes = 0; while ((docID = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { @@ -321,7 +321,7 @@ private SegmentCommitInfo syncSegment( } } - private boolean assertLiveDocs(Bits liveDocs, int deletes) { + private static boolean assertLiveDocs(Bits liveDocs, int deletes) { int actualDeletes = 0; for (int i = 0; i < liveDocs.length(); i++) { if (liveDocs.get(i) == false) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java index 13b659c95c1f5..a0aaf7f3bfeb5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/mapper/DataTierFieldMapper.java @@ -86,7 +86,7 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) * Retrieve the first tier preference from the index setting. If the setting is not * present, then return null. */ - private String getTierPreference(QueryRewriteContext context) { + private static String getTierPreference(QueryRewriteContext context) { Settings settings = context.getIndexSettings().getSettings(); String value = DataTier.TIER_PREFERENCE_SETTING.get(settings); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java index ec969538b0733..1e8bd2e8107b8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java @@ -82,7 +82,7 @@ public ActionRequestValidationException validate() { return error; } - private ActionRequestValidationException checkConfigIdIsValid( + private static ActionRequestValidationException checkConfigIdIsValid( DataFrameAnalyticsConfig analyticsConfig, ActionRequestValidationException error ) { @@ -106,7 +106,7 @@ private ActionRequestValidationException checkConfigIdIsValid( return error; } - private ActionRequestValidationException checkNoIncludedAnalyzedFieldsAreExcludedBySourceFiltering( + private static ActionRequestValidationException checkNoIncludedAnalyzedFieldsAreExcludedBySourceFiltering( DataFrameAnalyticsConfig analyticsConfig, ActionRequestValidationException error ) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java index ebe8f74829997..67cf1865efb78 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java @@ -84,7 +84,7 @@ public ActionRequestValidationException validate() { return error; } - private ActionRequestValidationException checkConfigIdIsValid( + private static ActionRequestValidationException checkConfigIdIsValid( DataFrameAnalyticsConfig analyticsConfig, ActionRequestValidationException error ) { @@ -108,7 +108,7 @@ private ActionRequestValidationException checkConfigIdIsValid( return error; } - private ActionRequestValidationException checkNoIncludedAnalyzedFieldsAreExcludedBySourceFiltering( + private static ActionRequestValidationException checkNoIncludedAnalyzedFieldsAreExcludedBySourceFiltering( DataFrameAnalyticsConfig analyticsConfig, ActionRequestValidationException error ) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index cacc4a6a33196..db1b66982f105 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -725,7 +725,7 @@ public TimeValue defaultFrequency(TimeValue bucketSpan, NamedXContentRegistry xC return defaultFrequency; } - private TimeValue defaultFrequencyTarget(TimeValue bucketSpan) { + private static TimeValue defaultFrequencyTarget(TimeValue bucketSpan) { long bucketSpanSeconds = bucketSpan.seconds(); if (bucketSpanSeconds <= 0) { throw new IllegalArgumentException("Bucket span has to be > 0"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index ff3e765508daf..6b34277ac49ba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -255,7 +255,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private void addOptionalField(XContentBuilder builder, ParseField field, Object value) throws IOException { + private static void addOptionalField(XContentBuilder builder, ParseField field, Object value) throws IOException { if (value != null) { builder.field(field.getPreferredName(), value); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java index c45b10e9decf6..a43cf3da4bbae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java @@ -162,7 +162,7 @@ protected static double calculateAucScore(List rocCurve) { private record RateThresholdCurve(double[] percentiles, boolean isTp) { - private double getRate(int index) { + private static double getRate(int index) { return 1 - 0.01 * (index + 1); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/inference/EnsembleInferenceModel.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/inference/EnsembleInferenceModel.java index 10f0cc6770a9c..c0e8610a357b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/inference/EnsembleInferenceModel.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/inference/EnsembleInferenceModel.java @@ -181,7 +181,7 @@ private InferenceResults innerInfer(double[] features, InferenceConfig config, M return featureInfluence; } - private void addFeatureImportance(double[][] featureInfluence, RawInferenceResults inferenceResult) { + private static void addFeatureImportance(double[][] featureInfluence, RawInferenceResults inferenceResult) { double[][] modelFeatureImportance = inferenceResult.getFeatureImportance(); assert modelFeatureImportance.length == featureInfluence.length; for (int j = 0; j < modelFeatureImportance.length; j++) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeNode.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeNode.java index ba5ef0ae663e4..b0e82568d8b3d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeNode.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/TreeNode.java @@ -203,7 +203,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private void addOptionalDouble(XContentBuilder builder, ParseField field, double value) throws IOException { + private static void addOptionalDouble(XContentBuilder builder, ParseField field, double value) throws IOException { if (Numbers.isValidDouble(value)) { builder.field(field.getPreferredName(), value); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index 1aff73cadd54a..52fd74e572200 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -719,7 +719,7 @@ private void validateRule(DetectionRule rule, DetectorFunction detectorFunction) checkScoping(rule); } - private void checkFunctionHasRuleSupport(DetectionRule rule, DetectorFunction detectorFunction) { + private static void checkFunctionHasRuleSupport(DetectionRule rule, DetectorFunction detectorFunction) { if (ruleHasConditionOnResultValue(rule) && FUNCTIONS_WITHOUT_RULE_CONDITION_SUPPORT.contains(detectorFunction)) { String msg = Messages.getMessage(Messages.JOB_CONFIG_DETECTION_RULE_NOT_SUPPORTED_BY_FUNCTION, detectorFunction); throw ExceptionsHelper.badRequestException(msg); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java index 54f77655e2822..ca1fd98b7bfb3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java @@ -360,7 +360,7 @@ private Map> inputFieldMap() { return result; } - private void addInputFieldsToMap(Map> inputFields, String inputFieldName, String fieldValue) { + private static void addInputFieldsToMap(Map> inputFields, String inputFieldName, String fieldValue) { if (Strings.isNullOrEmpty(inputFieldName) == false && fieldValue != null) { if (ReservedFieldNames.isValidFieldName(inputFieldName)) { inputFields.computeIfAbsent(inputFieldName, k -> new LinkedHashSet<>()).add(fieldValue); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java index 0483e8d302940..b9d39aa665848 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java @@ -1347,7 +1347,7 @@ private TreeSet getSet(int type) { }; } - private ValueSet getValue(int v, String s, int i) { + private static ValueSet getValue(int v, String s, int i) { char c = s.charAt(i); StringBuilder s1 = new StringBuilder(String.valueOf(v)); while (c >= '0' && c <= '9') { @@ -1365,13 +1365,13 @@ private ValueSet getValue(int v, String s, int i) { return val; } - private int getNumericValue(String s, int i) { + private static int getNumericValue(String s, int i) { int endOfVal = findNextWhiteSpace(i, s); String val = s.substring(i, endOfVal); return Integer.parseInt(val); } - private int getMonthNumber(String s) { + private static int getMonthNumber(String s) { Integer integer = MONTH_MAP.get(s); if (integer == null) { @@ -1381,7 +1381,7 @@ private int getMonthNumber(String s) { return integer; } - private int getDayOfWeekNumber(String s) { + private static int getDayOfWeekNumber(String s) { Integer integer = DAY_MAP.get(s); if (integer == null) { @@ -1409,7 +1409,7 @@ private static boolean isLeapYear(int year) { return ((year % 4 == 0 && year % 100 != 0) || (year % 400 == 0)); } - private int getLastDayOfMonth(int monthNum, int year) { + private static int getLastDayOfMonth(int monthNum, int year) { return switch (monthNum) { case 1 -> 31; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java index bb5b364005395..43908b6cf13fb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java @@ -249,7 +249,7 @@ public int hashCode() { return Objects.hash(realmName, userName, ids, name, ownedByAuthenticatedUser); } - private void validateIds(@Nullable String[] idsToValidate) { + private static void validateIds(@Nullable String[] idsToValidate) { if (idsToValidate != null) { if (idsToValidate.length == 0) { final ActionRequestValidationException validationException = new ActionRequestValidationException(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java index 98d40deaf7f1e..c011bd8af6c51 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequestBuilder.java @@ -35,7 +35,8 @@ public PutPrivilegesRequestBuilder(ElasticsearchClient client) { super(client, PutPrivilegesAction.INSTANCE, new PutPrivilegesRequest()); } - ApplicationPrivilegeDescriptor parsePrivilege(XContentParser parser, String applicationName, String privilegeName) throws IOException { + static ApplicationPrivilegeDescriptor parsePrivilege(XContentParser parser, String applicationName, String privilegeName) + throws IOException { ApplicationPrivilegeDescriptor privilege = ApplicationPrivilegeDescriptor.parse(parser, applicationName, privilegeName, false); checkPrivilegeName(privilege, applicationName, privilegeName); return privilege; @@ -99,7 +100,7 @@ public PutPrivilegesRequestBuilder source(BytesReference source, XContentType xC return this; } - private void checkPrivilegeName(ApplicationPrivilegeDescriptor privilege, String applicationName, String providedName) { + private static void checkPrivilegeName(ApplicationPrivilegeDescriptor privilege, String applicationName, String providedName) { final String privilegeName = privilege.getName(); if (Strings.isNullOrEmpty(applicationName) == false && applicationName.equals(privilege.getApplication()) == false) { throw new IllegalArgumentException( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java index 50d15672ae80d..5ec28dc68181e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java @@ -124,12 +124,12 @@ private RoleMapperExpression parseExpression(XContentParser parser, String field } } - private RoleMapperExpression parseFieldExpression(XContentParser parser) throws IOException { + private static RoleMapperExpression parseFieldExpression(XContentParser parser) throws IOException { checkStartObject(parser); final String fieldName = readFieldName(Fields.FIELD.getPreferredName(), parser); final List values; if (parser.nextToken() == XContentParser.Token.START_ARRAY) { - values = parseArray(Fields.FIELD, parser, this::parseFieldValue); + values = parseArray(Fields.FIELD, parser, ExpressionParser::parseFieldValue); } else { values = Collections.singletonList(parseFieldValue(parser)); } @@ -147,14 +147,14 @@ private RoleMapperExpression parseExceptExpression(XContentParser parser) throws return new ExceptExpression(parseRulesObject(Fields.EXCEPT.getPreferredName(), parser, false)); } - private void checkStartObject(XContentParser parser) throws IOException { + private static void checkStartObject(XContentParser parser) throws IOException { final XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { throw new ElasticsearchParseException("failed to parse rules expression. expected an object but found [{}] instead", token); } } - private String readFieldName(String objectName, XContentParser parser) throws IOException { + private static String readFieldName(String objectName, XContentParser parser) throws IOException { if (parser.nextToken() != XContentParser.Token.FIELD_NAME) { throw new ElasticsearchParseException("failed to parse rules expression. object [{}] does not contain any fields", objectName); } @@ -167,8 +167,11 @@ private List parseExpressionArray(ParseField field, XConte return parseArray(field, parser, p -> parseRulesObject(field.getPreferredName(), p, allowExcept)); } - private List parseArray(ParseField field, XContentParser parser, CheckedFunction elementParser) - throws IOException { + private static List parseArray( + ParseField field, + XContentParser parser, + CheckedFunction elementParser + ) throws IOException { final XContentParser.Token token = parser.currentToken(); if (token == XContentParser.Token.START_ARRAY) { List list = new ArrayList<>(); @@ -181,7 +184,7 @@ private List parseArray(ParseField field, XContentParser parser, CheckedF } } - private FieldExpression.FieldValue parseFieldValue(XContentParser parser) throws IOException { + private static FieldExpression.FieldValue parseFieldValue(XContentParser parser) throws IOException { return switch (parser.currentToken()) { case VALUE_STRING -> new FieldExpression.FieldValue(parser.text()); case VALUE_BOOLEAN -> new FieldExpression.FieldValue(parser.booleanValue()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java index 69edb7151e324..ec99301b92357 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java @@ -272,7 +272,7 @@ public BitSet getBitSet(final Query query, final LeafReaderContext context) thro } @Nullable - private BitSet computeBitSet(Query query, LeafReaderContext context) throws IOException { + private static BitSet computeBitSet(Query query, LeafReaderContext context) throws IOException { final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index 9e26ff5e2bf38..13558175afaa3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -613,7 +613,7 @@ private boolean isActionGranted(final String action, final Map usageStats() { return Collections.emptyMap(); } - public RoleDescriptor roleDescriptor(String role) { + public static RoleDescriptor roleDescriptor(String role) { return RESERVED_ROLES.get(role); } - public Collection roleDescriptors() { + public static Collection roleDescriptors() { return RESERVED_ROLES.values(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java index ae47fff47f82e..0ca489b32eb6d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java @@ -673,7 +673,7 @@ private void addIngestPipelinesIfMissing(ClusterState state) { } } - private boolean pipelineDependenciesExist(ClusterState state, List dependencies) { + private static boolean pipelineDependenciesExist(ClusterState state, List dependencies) { for (String dependency : dependencies) { if (findInstalledPipeline(state, dependency) == null) { return false; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java index a630358775d4e..78406929c2cf7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TransportTermsEnumAction.java @@ -150,7 +150,7 @@ protected void doExecute(Task task, TermsEnumRequest request, ActionListener shardIds, @@ -165,7 +165,7 @@ protected NodeTermsEnumRequest newNodeRequest( return new NodeTermsEnumRequest(originalIndices, nodeId, shardIds, request, taskStartMillis); } - private NodeTermsEnumResponse readShardResponse(StreamInput in) throws IOException { + private static NodeTermsEnumResponse readShardResponse(StreamInput in) throws IOException { return new NodeTermsEnumResponse(in); } @@ -207,7 +207,7 @@ protected Map> getNodeBundles(ClusterState clusterState, St return fastNodeBundles; } - private TermsEnumResponse mergeResponses( + private static TermsEnumResponse mergeResponses( TermsEnumRequest request, AtomicReferenceArray atomicResponses, boolean complete, @@ -274,7 +274,7 @@ private TermsEnumResponse mergeResponses( return new TermsEnumResponse(ans, (failedShards + successfulShards), successfulShards, failedShards, shardFailures, complete); } - private List mergeResponses(List> termsList, int size) { + private static List mergeResponses(List> termsList, int size) { final PriorityQueue pq = new PriorityQueue<>(termsList.size()) { @Override protected boolean lessThan(TermIterator a, TermIterator b) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java index beecea3b9f054..af13fcbcb7139 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java @@ -240,7 +240,7 @@ public boolean changesHeaders(TransformConfig config) { return isNullOrEqual(headers, config.getHeaders()) == false; } - private boolean isNullOrEqual(Object lft, Object rgt) { + private static boolean isNullOrEqual(Object lft, Object rgt) { return lft == null || lft.equals(rgt); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerStats.java index 1ef90ea55170e..7cf84ac3ecea8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerStats.java @@ -261,7 +261,7 @@ public void incrementCheckpointExponentialAverages(long checkpointDurationMs, lo } } - private double calculateExpAvg(double previousExpValue, double alpha, long observedValue) { + private static double calculateExpAvg(double previousExpValue, double alpha, long observedValue) { return alpha * observedValue + (1 - alpha) * previousExpValue; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java index 3d21a7f2f14bb..a0ae881919d81 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java @@ -183,7 +183,7 @@ public String toString() { } } - private Interval readInterval(StreamInput in) throws IOException { + private static Interval readInterval(StreamInput in) throws IOException { byte id = in.readByte(); return switch (id) { case FIXED_INTERVAL_ID -> new FixedInterval(in); @@ -192,7 +192,7 @@ private Interval readInterval(StreamInput in) throws IOException { }; } - private void writeInterval(Interval anInterval, StreamOutput out) throws IOException { + private static void writeInterval(Interval anInterval, StreamOutput out) throws IOException { out.write(anInterval.getIntervalTypeId()); anInterval.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java index de7cd4f78e2be..daad2fcdbfbdd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/ActionWrapper.java @@ -237,7 +237,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - private Map toMap(WatchExecutionContext ctx) { + private static Map toMap(WatchExecutionContext ctx) { Map model = new HashMap<>(); model.put("id", ctx.id().value()); model.put("watch_id", ctx.id().watchId()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java index c65710d38fd38..49e00588746fd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoService.java @@ -156,7 +156,7 @@ public char[] decrypt(char[] chars) { * @param chars the chars to check if they are encrypted * @return true is data is encrypted */ - protected boolean isEncrypted(char[] chars) { + protected static boolean isEncrypted(char[] chars) { return CharArrays.charsBeginsWith(ENCRYPTED_TEXT_PREFIX, chars); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index b30a7f5360f0a..41e89a4403d17 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -297,7 +297,7 @@ public void testSnapshotUserRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("snapshot_user"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("snapshot_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -372,7 +372,7 @@ public void testIngestAdminRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("ingest_admin"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("ingest_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -414,7 +414,7 @@ public void testKibanaSystemRole() { () -> AuthenticationTestHelper.builder().build() ); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("kibana_system"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("kibana_system"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -1339,7 +1339,7 @@ public void testKibanaAdminRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("kibana_admin"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("kibana_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); assertThat(roleDescriptor.getMetadata(), not(hasEntry("_deprecated", true))); @@ -1400,7 +1400,7 @@ public void testKibanaUserRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("kibana_user"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("kibana_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); assertThat(roleDescriptor.getMetadata(), hasEntry("_deprecated", true)); @@ -1470,7 +1470,7 @@ public void testMonitoringUserRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("monitoring_user"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("monitoring_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -1598,7 +1598,7 @@ public void testRemoteMonitoringAgentRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("remote_monitoring_agent"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("remote_monitoring_agent"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -1777,7 +1777,7 @@ public void testRemoteMonitoringCollectorRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("remote_monitoring_collector"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("remote_monitoring_collector"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -2046,7 +2046,7 @@ public void testReportingUserRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("reporting_user"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("reporting_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); assertThat(roleDescriptor.getMetadata(), hasEntry("_deprecated", true)); @@ -2102,7 +2102,7 @@ public void testSuperuserRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("superuser"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("superuser"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -2229,7 +2229,7 @@ public void testLogstashSystemRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("logstash_system"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("logstash_system"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -2267,7 +2267,7 @@ public void testBeatsAdminRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - final RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("beats_admin"); + final RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("beats_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -2318,7 +2318,7 @@ public void testBeatsSystemRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor(UsernamesField.BEATS_ROLE); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor(UsernamesField.BEATS_ROLE); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -2360,7 +2360,7 @@ public void testAPMSystemRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor(UsernamesField.APM_ROLE); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor(UsernamesField.APM_ROLE); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -2413,7 +2413,7 @@ public void testAPMUserRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - final RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("apm_user"); + final RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("apm_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -2496,7 +2496,7 @@ public void testMachineLearningAdminRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("machine_learning_admin"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("machine_learning_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -2636,7 +2636,7 @@ public void testMachineLearningUserRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("machine_learning_user"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("machine_learning_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -2761,8 +2761,8 @@ public void testTransformAdminRole() { final Authentication authentication = AuthenticationTestHelper.builder().build(); RoleDescriptor[] roleDescriptors = { - new ReservedRolesStore().roleDescriptor("data_frame_transforms_admin"), - new ReservedRolesStore().roleDescriptor("transform_admin") }; + ReservedRolesStore.roleDescriptor("data_frame_transforms_admin"), + ReservedRolesStore.roleDescriptor("transform_admin") }; for (RoleDescriptor roleDescriptor : roleDescriptors) { assertNotNull(roleDescriptor); @@ -2850,8 +2850,8 @@ public void testTransformUserRole() { final Authentication authentication = AuthenticationTestHelper.builder().build(); RoleDescriptor[] roleDescriptors = { - new ReservedRolesStore().roleDescriptor("data_frame_transforms_user"), - new ReservedRolesStore().roleDescriptor("transform_user") }; + ReservedRolesStore.roleDescriptor("data_frame_transforms_user"), + ReservedRolesStore.roleDescriptor("transform_user") }; for (RoleDescriptor roleDescriptor : roleDescriptors) { assertNotNull(roleDescriptor); @@ -2943,7 +2943,7 @@ public void testWatcherAdminRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("watcher_admin"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("watcher_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -2974,7 +2974,7 @@ public void testWatcherUserRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("watcher_user"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("watcher_user"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -3009,7 +3009,7 @@ public void testPredefinedViewerRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("viewer"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("viewer"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -3080,7 +3080,7 @@ public void testPredefinedEditorRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("editor"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("editor"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -3150,7 +3150,7 @@ public void testPredefinedEditorRole() { } public void testRemoteIndicesPrivilegesForSuperuserRole() { - final RoleDescriptor superuserRoleDescriptor = new ReservedRolesStore().roleDescriptor("superuser"); + final RoleDescriptor superuserRoleDescriptor = ReservedRolesStore.roleDescriptor("superuser"); final Role superuserRole = Role.buildFromRoleDescriptor( superuserRoleDescriptor, new FieldPermissionsCache(Settings.EMPTY), @@ -3172,7 +3172,7 @@ public void testRemoteIndicesPrivilegesForSuperuserRole() { public void testRemoteIndicesPrivileges() { final List rolesWithRemoteIndicesPrivileges = new ArrayList<>(); - for (RoleDescriptor roleDescriptor : new ReservedRolesStore().roleDescriptors()) { + for (RoleDescriptor roleDescriptor : ReservedRolesStore.roleDescriptors()) { if (roleDescriptor.getName().equals("superuser")) { continue; // superuser is tested separately } @@ -3296,7 +3296,7 @@ public void testLogstashAdminRole() { final TransportRequest request = mock(TransportRequest.class); final Authentication authentication = AuthenticationTestHelper.builder().build(); - RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("logstash_admin"); + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("logstash_admin"); assertNotNull(roleDescriptor); assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); @@ -3397,7 +3397,7 @@ public void testIncludeReservedRoles() { assertThat(roleRetrievalResult.isSuccess(), is(true)); assertThat(roleRetrievalResult.getDescriptors().stream().map(RoleDescriptor::getName).toList(), contains(roleName)); - assertThat(reservedRolesStore.roleDescriptor(roleName), notNullValue()); + assertThat(ReservedRolesStore.roleDescriptor(roleName), notNullValue()); } else { assertThat(ReservedRolesStore.isReserved(roleName), is(false)); assertThat(ReservedRolesStore.names(), not(hasItem(roleName))); @@ -3407,12 +3407,12 @@ public void testIncludeReservedRoles() { assertThat(roleRetrievalResult.isSuccess(), is(true)); assertThat(roleRetrievalResult.getDescriptors(), emptyIterable()); - assertThat(reservedRolesStore.roleDescriptor(roleName), nullValue()); + assertThat(ReservedRolesStore.roleDescriptor(roleName), nullValue()); } } assertThat( - reservedRolesStore.roleDescriptors().stream().map(RoleDescriptor::getName).collect(Collectors.toUnmodifiableSet()), + ReservedRolesStore.roleDescriptors().stream().map(RoleDescriptor::getName).collect(Collectors.toUnmodifiableSet()), equalTo(includedRoles) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoServiceTests.java index d23c21f88c9c3..f48a02189ab8e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/watcher/crypto/CryptoServiceTests.java @@ -45,12 +45,12 @@ public void testEncryptionAndDecryptionChars() throws Exception { public void testEncryptedChar() throws Exception { CryptoService service = new CryptoService(settings); - assertThat(service.isEncrypted((char[]) null), is(false)); - assertThat(service.isEncrypted(new char[0]), is(false)); - assertThat(service.isEncrypted(new char[CryptoService.ENCRYPTED_TEXT_PREFIX.length()]), is(false)); - assertThat(service.isEncrypted(CryptoService.ENCRYPTED_TEXT_PREFIX.toCharArray()), is(true)); - assertThat(service.isEncrypted(randomAlphaOfLengthBetween(0, 100).toCharArray()), is(false)); - assertThat(service.isEncrypted(service.encrypt(randomAlphaOfLength(10).toCharArray())), is(true)); + assertThat(CryptoService.isEncrypted((char[]) null), is(false)); + assertThat(CryptoService.isEncrypted(new char[0]), is(false)); + assertThat(CryptoService.isEncrypted(new char[CryptoService.ENCRYPTED_TEXT_PREFIX.length()]), is(false)); + assertThat(CryptoService.isEncrypted(CryptoService.ENCRYPTED_TEXT_PREFIX.toCharArray()), is(true)); + assertThat(CryptoService.isEncrypted(randomAlphaOfLengthBetween(0, 100).toCharArray()), is(false)); + assertThat(CryptoService.isEncrypted(service.encrypt(randomAlphaOfLength(10).toCharArray())), is(true)); } public void testErrorMessageWhenSecureEncryptionKeySettingDoesNotExist() throws Exception { diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/RestDeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/RestDeprecationInfoAction.java index 8aeac3d3881c6..92a2242d114f9 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/RestDeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/RestDeprecationInfoAction.java @@ -45,7 +45,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client } } - private RestChannelConsumer handleGet(final RestRequest request, NodeClient client) { + private static RestChannelConsumer handleGet(final RestRequest request, NodeClient client) { Request infoRequest = new Request(Strings.splitStringByCommaToArray(request.param("index"))); return channel -> client.execute(DeprecationInfoAction.INSTANCE, infoRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransformDeprecationChecker.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransformDeprecationChecker.java index df2c6c1d247d4..45384afbec59e 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransformDeprecationChecker.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransformDeprecationChecker.java @@ -43,7 +43,7 @@ public String getName() { return TRANSFORM_DEPRECATION_KEY; } - private void recursiveGetTransformsAndCollectDeprecations( + private static void recursiveGetTransformsAndCollectDeprecations( Components components, List issues, PageParams page, diff --git a/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java b/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java index 86624cdd5cec9..2878b36b0248c 100644 --- a/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java +++ b/x-pack/plugin/enrich/qa/common/src/main/java/org/elasticsearch/test/enrich/CommonEnrichRestTestCase.java @@ -77,7 +77,7 @@ public void deletePipelinesAndPolicies() throws Exception { } @SuppressWarnings("unchecked") - private Property unsafeGetProperty(Map map, String key) { + private static Property unsafeGetProperty(Map map, String key) { return (Property) map.get(key); } diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java index abc811afa8be4..749bce59a4bbb 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichCache.java @@ -95,7 +95,7 @@ private String getEnrichIndexKey(SearchRequest searchRequest) { return ia.getIndices().get(0).getName(); } - List> toCacheValue(SearchResponse response) { + static List> toCacheValue(SearchResponse response) { List> result = new ArrayList<>(response.getHits().getHits().length); for (SearchHit hit : response.getHits()) { result.add(deepCopy(hit.getSourceAsMap(), true)); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java index 7d1c52cf8a1f6..502f71d0f225a 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java @@ -165,7 +165,11 @@ void cleanUpEnrichIndices() { deleteIndices(removeIndices); } - private boolean indexUsedByPolicy(IndexMetadata indexMetadata, Map policies, Set inflightPolicyIndices) { + private static boolean indexUsedByPolicy( + IndexMetadata indexMetadata, + Map policies, + Set inflightPolicyIndices + ) { String indexName = indexMetadata.getIndex().getName(); logger.debug("Checking if should remove enrich index [{}]", indexName); // First ignore the index entirely if it is in the inflightPolicyIndices set as it is actively being worked on diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java index 3f77dc9d722ab..48b9e94cb71e9 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java @@ -143,7 +143,7 @@ public void run() { } } - private List> toMappings(GetIndexResponse response) { + private static List> toMappings(GetIndexResponse response) { return response.mappings().values().stream().map(MappingMetadata::getSourceAsMap).collect(Collectors.toList()); } diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactory.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactory.java index 9675d7e3e36b6..907ebb0c9ce3a 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactory.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactory.java @@ -136,7 +136,7 @@ public void accept(ClusterState state) { handler.accept(response, null); } else { originClient.execute(EnrichCoordinatorProxyAction.INSTANCE, req, ActionListener.wrap(resp -> { - List> value = enrichCache.toCacheValue(resp); + List> value = EnrichCache.toCacheValue(resp); enrichCache.put(req, value); handler.accept(EnrichCache.deepCopy(value, false), null); }, e -> { handler.accept(null, e); })); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java index 08cd87a78d874..df8ea5344708d 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java @@ -155,7 +155,7 @@ private boolean assertSearchSource() { return true; } - private SearchSourceBuilder copy(SearchSourceBuilder source) { + private static SearchSourceBuilder copy(SearchSourceBuilder source) { NamedWriteableRegistry registry = new NamedWriteableRegistry(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); try (BytesStreamOutput output = new BytesStreamOutput()) { source.writeTo(output); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java index 4d5ea0b5a3d01..be0d38acf323e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java @@ -204,12 +204,15 @@ public void onFailure(Exception e) { ); } - private void addSearchApplicationsUsage(ListSearchApplicationAction.Response response, Map searchApplicationsUsage) { + private static void addSearchApplicationsUsage( + ListSearchApplicationAction.Response response, + Map searchApplicationsUsage + ) { long count = response.queryPage().count(); searchApplicationsUsage.put(EnterpriseSearchFeatureSetUsage.COUNT, count); } - private void addAnalyticsCollectionsUsage( + private static void addAnalyticsCollectionsUsage( GetAnalyticsCollectionAction.Response response, Map analyticsCollectionsUsage ) { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsCollectionResolver.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsCollectionResolver.java index 13e2b9528c1a0..72ffc1b978304 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsCollectionResolver.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsCollectionResolver.java @@ -104,7 +104,7 @@ public List collections(ClusterState state, String... expre return new ArrayList<>(collections.values()); } - private boolean matchExpression(String collectionName, String expression) { + private static boolean matchExpression(String collectionName, String expression) { if (Strings.isNullOrEmpty(expression)) { return false; } @@ -120,7 +120,7 @@ private boolean matchExpression(String collectionName, String expression) { return collectionName.equals(expression); } - private boolean matchAnyExpression(String collectionName, String... expressions) { + private static boolean matchAnyExpression(String collectionName, String... expressions) { if (expressions.length < 1) { return true; } @@ -128,7 +128,7 @@ private boolean matchAnyExpression(String collectionName, String... expressions) return Arrays.stream(expressions).anyMatch(expression -> matchExpression(collectionName, expression)); } - private boolean matchAnyExpression(AnalyticsCollection collection, String... expressions) { + private static boolean matchAnyExpression(AnalyticsCollection collection, String... expressions) { return matchAnyExpression(collection.getName(), expressions); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/RestPostAnalyticsEventAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/RestPostAnalyticsEventAction.java index 0e3bb7150c45e..34292c4669333 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/RestPostAnalyticsEventAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/RestPostAnalyticsEventAction.java @@ -56,7 +56,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest restRequest, NodeC ); } - private InetAddress getClientAddress(RestRequest restRequest, Map> headers) { + private static InetAddress getClientAddress(RestRequest restRequest, Map> headers) { InetAddress remoteAddress = restRequest.getHttpChannel().getRemoteAddress().getAddress(); if (headers.containsKey(X_FORWARDED_FOR_HEADER)) { final List addresses = headers.get(X_FORWARDED_FOR_HEADER); @@ -71,7 +71,7 @@ private InetAddress getClientAddress(RestRequest restRequest, Map sourceTuple = restRequest.contentOrSourceParam(); PostAnalyticsEventAction.RequestBuilder builder = PostAnalyticsEventAction.Request.builder( diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java index 01807fd986947..a2ad6a59e54fe 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java @@ -211,7 +211,7 @@ public void onFailure(Exception e) { } @SuppressWarnings("unchecked") - private List parseCriteria(List> rawCriteria) { + private static List parseCriteria(List> rawCriteria) { List criteria = new ArrayList<>(rawCriteria.size()); for (Map entry : rawCriteria) { criteria.add( diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java index 51b55a0a1c033..46e4d45f2c146 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java @@ -206,12 +206,7 @@ public void getSearchApplication(String resourceName, ActionListener index.getName()) - .toArray(String[]::new); + return clusterService.state().metadata().aliasedIndices(searchApplicationName).stream().map(Index::getName).toArray(String[]::new); } private static String getSearchAliasName(SearchApplication app) { diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java index 11dc831e8e846..e11d1cab8eaa7 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/BaseEqlSpecTestCase.java @@ -70,7 +70,7 @@ public void setup() throws Exception { } } - private boolean doWithRequest(Request request, RestClient client, Function consumer) { + private static boolean doWithRequest(Request request, RestClient client, Function consumer) { try { return consumer.apply(client.performRequest(request)); } catch (IOException e) { @@ -228,7 +228,7 @@ public String toString() { } - private String eventsToString(List> events) { + private static String eventsToString(List> events) { StringJoiner sj = new StringJoiner(",", "[", "]"); for (Map event : events) { sj.add(event.get("_id") + "|" + event.get("_index")); @@ -301,7 +301,7 @@ public String toString() { ); } - private String keysToString(List keys) { + private static String keysToString(List keys) { StringJoiner sj = new StringJoiner(",", "[", "]"); for (Object key : keys) { sj.add(key.toString()); diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlRestTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlRestTestCase.java index 074fce78e6b93..65ed174f55625 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlRestTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/EqlRestTestCase.java @@ -185,7 +185,7 @@ public void testUnicodeChars() throws Exception { deleteIndexWithProvisioningClient("test"); } - private void bulkIndex(String bulk) throws IOException { + private static void bulkIndex(String bulk) throws IOException { Request bulkRequest = new Request("POST", "/_bulk"); bulkRequest.setJsonEntity(bulk); bulkRequest.addParameter("refresh", "true"); diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/stats/EqlUsageRestTestCase.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/stats/EqlUsageRestTestCase.java index 7c1804f2a5a32..2626fcfda1ef1 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/stats/EqlUsageRestTestCase.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/stats/EqlUsageRestTestCase.java @@ -103,7 +103,7 @@ public void getBaseMetrics() throws UnsupportedOperationException, IOException { * } */ @SuppressWarnings({ "unchecked", "rawtypes" }) - private Map getFeaturesMetrics(Map perNodeStats) { + private static Map getFeaturesMetrics(Map perNodeStats) { Map featuresMetrics = (Map) ((Map) perNodeStats.get("stats")).get("features"); featuresMetrics.putAll((Map) featuresMetrics.get("keys")); featuresMetrics.putAll((Map) featuresMetrics.get("sequences")); @@ -306,7 +306,7 @@ private void assertAllFailedQueryMetrics(int allFailedQueries, Map getStats() throws UnsupportedOperationException, IOException { + private static Map getStats() throws UnsupportedOperationException, IOException { Request request = new Request("GET", "/_eql/stats"); Map responseAsMap; try (InputStream content = client().performRequest(request).getEntity().getContent()) { @@ -349,7 +349,7 @@ private void assertFeatureMetric(int expected, Map responseAsMap } @SuppressWarnings({ "unchecked", "rawtypes" }) - private void assertQueryMetric(int expected, Map responseAsMap, String queryType, String metric) { + private static void assertQueryMetric(int expected, Map responseAsMap, String queryType, String metric) { List>> nodesListStats = (List) responseAsMap.get("stats"); int actualMetricValue = 0; for (Map perNodeStats : nodesListStats) { @@ -360,7 +360,7 @@ private void assertQueryMetric(int expected, Map responseAsMap, assertEquals(expected, actualMetricValue); } - private void assertAllQueryMetric(int expected, Map responseAsMap, String metric) { + private static void assertAllQueryMetric(int expected, Map responseAsMap, String metric) { assertQueryMetric(expected, responseAsMap, "_all", metric); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/analysis/Verifier.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/analysis/Verifier.java index c39d850d69ed9..cbede5871f275 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/analysis/Verifier.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/analysis/Verifier.java @@ -234,7 +234,7 @@ Collection verify(LogicalPlan plan) { return failures; } - private void checkJoinKeyTypes(LogicalPlan plan, Set localFailures) { + private static void checkJoinKeyTypes(LogicalPlan plan, Set localFailures) { if (plan instanceof Join join) { List queries = join.queries(); KeyedFilter until = join.until(); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java index 508dac362e028..dea45e4b9d766 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/ExecutionManager.java @@ -238,7 +238,7 @@ public Executable assemble(List> listOfKeys, List ); } - private boolean[] toMissing(List criteria) { + private static boolean[] toMissing(List criteria) { boolean[] result = new boolean[criteria.size()]; for (int i = 0; i < criteria.size(); i++) { result[i] = criteria.get(i).missing(); @@ -246,7 +246,7 @@ private boolean[] toMissing(List criteria) { return result; } - private HitExtractor timestampExtractor(HitExtractor hitExtractor) { + private static HitExtractor timestampExtractor(HitExtractor hitExtractor) { if (hitExtractor instanceof FieldHitExtractor fe) { return (fe instanceof TimestampFieldHitExtractor) ? hitExtractor : new TimestampFieldHitExtractor(fe); } @@ -265,7 +265,7 @@ private List hitExtractors(List exps, FieldE return extractors; } - private List compositeKeyExtractors(List exps, FieldExtractorRegistry registry) { + private static List compositeKeyExtractors(List exps, FieldExtractorRegistry registry) { List extractors = new ArrayList<>(exps.size()); for (Expression exp : exps) { extractors.add(RuntimeUtils.createBucketExtractor(registry.compositeKeyExtraction(exp))); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/SampleQueryRequest.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/SampleQueryRequest.java index 4a7f521c20050..873eda29dcdbf 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/SampleQueryRequest.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/SampleQueryRequest.java @@ -187,7 +187,7 @@ public void withCompositeAggregation() { searchSource.aggregation(agg); } - private boolean isOptionalAttribute(Attribute a) { + private static boolean isOptionalAttribute(Attribute a) { return a instanceof OptionalMissingAttribute || a instanceof OptionalResolvedAttribute; } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java index 04c64202fd3e4..bbee76e976f92 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java @@ -627,7 +627,7 @@ private void secondaryCriterion(WindowInfo window, int currentStage, ActionListe /** * Trim hits outside the (upper) limit. */ - private List trim(List searchHits, SequenceCriterion criterion, Ordinal boundary) { + private static List trim(List searchHits, SequenceCriterion criterion, Ordinal boundary) { int offset = 0; for (int i = searchHits.size() - 1; i >= 0; i--) { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/optimizer/Optimizer.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/optimizer/Optimizer.java index 3396222d9c2c6..5bccf013bc789 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/optimizer/Optimizer.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/optimizer/Optimizer.java @@ -403,7 +403,7 @@ protected LogicalPlan rule(AbstractJoin join) { return join; } - private List detectKeyConstraints(Expression condition, KeyedFilter filter) { + private static List detectKeyConstraints(Expression condition, KeyedFilter filter) { List constraints = new ArrayList<>(); List keys = filter.keys(); @@ -430,7 +430,7 @@ private List detectKeyConstraints(Expression condition, KeyedFilter } // adapt constraint to the given filter by replacing the keys accordingly in the expressions - private KeyedFilter addConstraint(KeyedFilter k, List constraints) { + private static KeyedFilter addConstraint(KeyedFilter k, List constraints) { Expression constraint = Predicates.combineAnd( constraints.stream().map(c -> c.constraintFor(k)).filter(Objects::nonNull).collect(toList()) ); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plan/physical/EsQueryExec.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plan/physical/EsQueryExec.java index cedd4ec8d035c..4877b4d909a72 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plan/physical/EsQueryExec.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plan/physical/EsQueryExec.java @@ -75,7 +75,7 @@ public void execute(EqlSession session, ActionListener listener) { new BasicQueryClient(session).query(request, new AsEventListener(listener)); } - private boolean shouldReverse(QueryRequest query) { + private static boolean shouldReverse(QueryRequest query) { SearchSourceBuilder searchSource = query.searchSource(); // since all results need to be ASC, use this hack to figure out whether the results need to be flipped for (SortBuilder sort : searchSource.sorts()) { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/querydsl/container/FieldExtractorRegistry.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/querydsl/container/FieldExtractorRegistry.java index d15326e9da0ea..30b827c87603d 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/querydsl/container/FieldExtractorRegistry.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/querydsl/container/FieldExtractorRegistry.java @@ -34,7 +34,7 @@ public FieldExtraction compositeKeyExtraction(Expression expression) { return cache.computeIfAbsent(Expressions.id(expression), k -> createKeyExtractionFor(expression)); } - private FieldExtraction createKeyExtractionFor(Expression expression) { + private static FieldExtraction createKeyExtractionFor(Expression expression) { if (expression instanceof FieldAttribute fieldAttribute) { FieldAttribute fa = fieldAttribute.exactAttribute(); if (fa.isNested()) { @@ -48,7 +48,7 @@ private FieldExtraction createKeyExtractionFor(Expression expression) { throw new EqlIllegalArgumentException("Unsupported expression [{}]", expression); } - private FieldExtraction createFieldExtractionFor(Expression expression) { + private static FieldExtraction createFieldExtractionFor(Expression expression) { if (expression instanceof FieldAttribute fieldAttribute) { FieldAttribute fa = fieldAttribute.exactAttribute(); if (fa.isNested()) { @@ -66,7 +66,7 @@ private FieldExtraction createFieldExtractionFor(Expression expression) { throw new EqlIllegalArgumentException("Unsupported expression [{}]", expression); } - private FieldExtraction topHitFieldExtractor(FieldAttribute fieldAttr) { + private static FieldExtraction topHitFieldExtractor(FieldAttribute fieldAttr) { return new SearchHitFieldRef(fieldAttr.name(), fieldAttr.field().getDataType(), fieldAttr.field().isAlias()); } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlSession.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlSession.java index 2882d083532e8..ff64b4c3e2a0f 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlSession.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlSession.java @@ -146,7 +146,7 @@ private LogicalPlan postAnalyze(LogicalPlan verified) { return postAnalyzer.postAnalyze(verified, configuration); } - private LogicalPlan doParse(String eql, ParserParams params) { + private static LogicalPlan doParse(String eql, ParserParams params) { return new EqlParser().createStatement(eql, params); } } diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java index 2073d2067445b..21f0e846ebfa6 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java @@ -156,7 +156,7 @@ public String getFeatureDescription() { return "Manages configuration for Fleet"; } - private SystemIndexDescriptor fleetActionsSystemIndexDescriptor() { + private static SystemIndexDescriptor fleetActionsSystemIndexDescriptor() { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); request.source(loadTemplateSource("/fleet-actions.json", FLEET_ACTIONS_MAPPINGS_VERSION), XContentType.JSON); @@ -174,7 +174,7 @@ private SystemIndexDescriptor fleetActionsSystemIndexDescriptor() { .build(); } - private SystemIndexDescriptor fleetAgentsSystemIndexDescriptor() { + private static SystemIndexDescriptor fleetAgentsSystemIndexDescriptor() { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); request.source(loadTemplateSource("/fleet-agents.json", FLEET_AGENTS_MAPPINGS_VERSION), XContentType.JSON); @@ -192,7 +192,7 @@ private SystemIndexDescriptor fleetAgentsSystemIndexDescriptor() { .build(); } - private SystemIndexDescriptor fleetEnrollmentApiKeysSystemIndexDescriptor() { + private static SystemIndexDescriptor fleetEnrollmentApiKeysSystemIndexDescriptor() { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); request.source( loadTemplateSource("/fleet-enrollment-api-keys.json", FLEET_ENROLLMENT_API_KEYS_MAPPINGS_VERSION), @@ -213,7 +213,7 @@ private SystemIndexDescriptor fleetEnrollmentApiKeysSystemIndexDescriptor() { .build(); } - private SystemIndexDescriptor fleetSecretsSystemIndexDescriptor() { + private static SystemIndexDescriptor fleetSecretsSystemIndexDescriptor() { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); request.source(loadTemplateSource("/fleet-secrets.json", FLEET_SECRETS_MAPPINGS_VERSION), XContentType.JSON); return SystemIndexDescriptor.builder() @@ -229,7 +229,7 @@ private SystemIndexDescriptor fleetSecretsSystemIndexDescriptor() { .build(); } - private SystemIndexDescriptor fleetPoliciesSystemIndexDescriptor() { + private static SystemIndexDescriptor fleetPoliciesSystemIndexDescriptor() { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); request.source(loadTemplateSource("/fleet-policies.json", FLEET_POLICIES_MAPPINGS_VERSION), XContentType.JSON); @@ -247,7 +247,7 @@ private SystemIndexDescriptor fleetPoliciesSystemIndexDescriptor() { .build(); } - private SystemIndexDescriptor fleetPoliciesLeaderSystemIndexDescriptor() { + private static SystemIndexDescriptor fleetPoliciesLeaderSystemIndexDescriptor() { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); request.source(loadTemplateSource("/fleet-policies-leader.json", FLEET_POLICIES_LEADER_MAPPINGS_VERSION), XContentType.JSON); @@ -265,7 +265,7 @@ private SystemIndexDescriptor fleetPoliciesLeaderSystemIndexDescriptor() { .build(); } - private SystemIndexDescriptor fleetServersSystemIndexDescriptors() { + private static SystemIndexDescriptor fleetServersSystemIndexDescriptors() { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); request.source(loadTemplateSource("/fleet-servers.json", FLEET_SERVERS_MAPPINGS_VERSION), XContentType.JSON); @@ -283,7 +283,7 @@ private SystemIndexDescriptor fleetServersSystemIndexDescriptors() { .build(); } - private SystemIndexDescriptor fleetArtifactsSystemIndexDescriptors() { + private static SystemIndexDescriptor fleetArtifactsSystemIndexDescriptors() { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); request.source(loadTemplateSource("/fleet-artifacts.json", FLEET_ARTIFACTS_MAPPINGS_VERSION), XContentType.JSON); @@ -301,7 +301,7 @@ private SystemIndexDescriptor fleetArtifactsSystemIndexDescriptors() { .build(); } - private SystemDataStreamDescriptor fleetActionsResultsDescriptor() { + private static SystemDataStreamDescriptor fleetActionsResultsDescriptor() { final String source = loadTemplateSource("/fleet-actions-results.json", FLEET_ACTIONS_RESULTS_MAPPINGS_VERSION); try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, source)) { ComposableIndexTemplate composableIndexTemplate = ComposableIndexTemplate.parse(parser); @@ -357,7 +357,7 @@ public void cleanUpFeature(ClusterService clusterService, Client client, ActionL } } - private String loadTemplateSource(String resource, int mappingsVersion) { + private static String loadTemplateSource(String resource, int mappingsVersion) { return TemplateUtils.loadTemplate( resource, Version.CURRENT.toString(), diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsShardAction.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsShardAction.java index 8728c63d7df01..a43f0fe94e1b7 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsShardAction.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsShardAction.java @@ -206,7 +206,7 @@ private void globalCheckpointAdvanced(final ShardId shardId, final Request reque } } - private void globalCheckpointAdvancementFailure( + private static void globalCheckpointAdvancementFailure( final IndexShard indexShard, final Request request, final Exception e, diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java index 16a6b98964eab..db825182e4621 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/action/TransportGraphExploreAction.java @@ -550,7 +550,7 @@ private double getExpandTotalSignalStrength(Hop lastHop, Hop currentHop, Sampler }); } - private void addUserDefinedIncludesToQuery(Hop hop, BoolQueryBuilder sourceTermsOrClause) { + private static void addUserDefinedIncludesToQuery(Hop hop, BoolQueryBuilder sourceTermsOrClause) { for (int i = 0; i < hop.getNumberVertexRequests(); i++) { VertexRequest vr = hop.getVertexRequest(i); if (vr.hasIncludeClauses()) { @@ -559,7 +559,7 @@ private void addUserDefinedIncludesToQuery(Hop hop, BoolQueryBuilder sourceTerms } } - private void addBigOrClause(Map> lastHopFindings, BoolQueryBuilder sourceTermsOrClause) { + private static void addBigOrClause(Map> lastHopFindings, BoolQueryBuilder sourceTermsOrClause) { int numClauses = sourceTermsOrClause.should().size(); for (Entry> entry : lastHopFindings.entrySet()) { numClauses += entry.getValue().size(); @@ -752,7 +752,7 @@ private double getInitialTotalSignalStrength(Hop rootHop, Sampler sample) { } } - private void addNormalizedBoosts(BoolQueryBuilder includesContainer, VertexRequest vr) { + private static void addNormalizedBoosts(BoolQueryBuilder includesContainer, VertexRequest vr) { TermBoost[] termBoosts = vr.includeValues(); if ((includesContainer.should().size() + termBoosts.length) > BooleanQuery.getMaxClauseCount()) { diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java index 342d359d94d80..472b6f8087a56 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java @@ -126,7 +126,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> client.execute(INSTANCE, graphRequest, new RestToXContentListener<>(channel)); } - private void parseHop(XContentParser parser, Hop currentHop, GraphExploreRequest graphRequest) throws IOException { + private static void parseHop(XContentParser parser, Hop currentHop, GraphExploreRequest graphRequest) throws IOException { String fieldName = null; XContentParser.Token token; @@ -163,7 +163,7 @@ private void parseHop(XContentParser parser, Hop currentHop, GraphExploreRequest } } - private void parseVertices(XContentParser parser, Hop currentHop) throws IOException { + private static void parseVertices(XContentParser parser, Hop currentHop) throws IOException { XContentParser.Token token; String fieldName = null; @@ -320,7 +320,7 @@ private void parseVertices(XContentParser parser, Hop currentHop) throws IOExcep } - private void parseControls(XContentParser parser, GraphExploreRequest graphRequest) throws IOException { + private static void parseControls(XContentParser parser, GraphExploreRequest graphRequest) throws IOException { XContentParser.Token token; String fieldName = null; diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportPutSamlServiceProviderAction.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportPutSamlServiceProviderAction.java index 7a1e66b3c1e78..31bcf6f428fe2 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportPutSamlServiceProviderAction.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/action/TransportPutSamlServiceProviderAction.java @@ -153,7 +153,7 @@ private void writeDocument( ); } - private String deriveDocumentId(SamlServiceProviderDocument document) { + private static String deriveDocumentId(SamlServiceProviderDocument document) { final byte[] sha256 = MessageDigests.sha256().digest(document.entityId.getBytes(StandardCharsets.UTF_8)); return Base64.getUrlEncoder().withoutPadding().encodeToString(sha256); } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/privileges/UserPrivilegeResolver.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/privileges/UserPrivilegeResolver.java index 03e1351dbd782..4ee2b91c5f2f7 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/privileges/UserPrivilegeResolver.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/privileges/UserPrivilegeResolver.java @@ -104,7 +104,7 @@ public void resolve(ServiceProviderPrivileges service, ActionListener appPrivileges = response.getApplicationPrivileges().get(service.getApplicationName()); if (appPrivileges == null || appPrivileges.isEmpty()) { return UserPrivileges.noAccess(response.getUsername()); diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/authn/SamlAuthnRequestValidator.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/authn/SamlAuthnRequestValidator.java index 29b95263e07e8..5c83da57f84a8 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/authn/SamlAuthnRequestValidator.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/authn/SamlAuthnRequestValidator.java @@ -96,11 +96,11 @@ public void processQueryString(String queryString, ActionListener { try { validateAuthnRequest(authnRequest, sp, parsedQueryString, listener); @@ -180,7 +180,7 @@ private void validateAuthnRequest( org.elasticsearch.core.Strings.format( "Unable to validate signature of authentication request [%s] using credentials [%s]", parsedQueryString.queryString, - samlFactory.describeCredentials(spSigningCredentials) + SamlFactory.describeCredentials(spSigningCredentials) ), listener ); @@ -223,7 +223,7 @@ private void validateAuthnRequest( listener.onResponse(response); } - private void validateNameIdPolicy(AuthnRequest request, SamlServiceProvider sp, Map authnState) { + private static void validateNameIdPolicy(AuthnRequest request, SamlServiceProvider sp, Map authnState) { final NameIDPolicy nameIDPolicy = request.getNameIDPolicy(); if (null != nameIDPolicy) { final String requestedFormat = nameIDPolicy.getFormat(); @@ -246,7 +246,7 @@ private void validateNameIdPolicy(AuthnRequest request, SamlServiceProvider sp, } private boolean validateSignature(ParsedQueryString queryString, Collection credentials) { - final String javaSigAlgorithm = samlFactory.getJavaAlorithmNameFromUri(queryString.sigAlg); + final String javaSigAlgorithm = SamlFactory.getJavaAlorithmNameFromUri(queryString.sigAlg); final byte[] contentBytes = queryString.reconstructQueryParameters().getBytes(StandardCharsets.UTF_8); final byte[] signatureBytes = Base64.getDecoder().decode(queryString.signature); return credentials.stream().anyMatch(credential -> { @@ -264,7 +264,7 @@ private boolean validateSignature(ParsedQueryString queryString, Collection format("Signature verification failed for credential [%s]", samlFactory.describeCredentials(Set.of(credential))), + () -> format("Signature verification failed for credential [%s]", SamlFactory.describeCredentials(Set.of(credential))), e ); return false; @@ -303,7 +303,7 @@ private void checkDestination(AuthnRequest request) { } } - private String checkAcs(AuthnRequest request, SamlServiceProvider sp, Map authnState) { + private static String checkAcs(AuthnRequest request, SamlServiceProvider sp, Map authnState) { final String acs = request.getAssertionConsumerServiceURL(); if (Strings.hasText(acs) == false) { final String message = request.getAssertionConsumerServiceIndex() == null @@ -347,7 +347,7 @@ private byte[] decodeBase64(String content) { } } - private byte[] inflate(byte[] bytes) { + private static byte[] inflate(byte[] bytes) { Inflater inflater = new Inflater(true); try ( ByteArrayInputStream in = new ByteArrayInputStream(bytes); @@ -361,7 +361,7 @@ private byte[] inflate(byte[] bytes) { } } - private String urlEncode(String param) { + private static String urlEncode(String param) { return URLEncoder.encode(param, StandardCharsets.UTF_8); } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/authn/SuccessfulAuthenticationResponseMessageBuilder.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/authn/SuccessfulAuthenticationResponseMessageBuilder.java index 003eb3f2c1e75..c59d99c7171c4 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/authn/SuccessfulAuthenticationResponseMessageBuilder.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/authn/SuccessfulAuthenticationResponseMessageBuilder.java @@ -98,7 +98,7 @@ public Response build(UserServiceAuthentication user, @Nullable SamlAuthenticati private Response sign(Response response) { final SamlObjectSigner signer = new SamlObjectSigner(samlFactory, idp); - return samlFactory.buildXmlObject(signer.sign(response), Response.class); + return SamlFactory.buildXmlObject(signer.sign(response), Response.class); } private Conditions buildConditions(Instant now, SamlServiceProvider serviceProvider) { @@ -157,7 +157,7 @@ private AuthnStatement buildAuthnStatement(Instant now, UserServiceAuthenticatio return statement; } - private String resolveAuthnClass(Set authenticationMethods, Set networkControls) { + private static String resolveAuthnClass(Set authenticationMethods, Set networkControls) { if (authenticationMethods.contains(AuthenticationMethod.PASSWORD)) { if (networkControls.contains(NetworkControl.IP_FILTER)) { return AuthnContext.IP_PASSWORD_AUTHN_CTX; diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdPMetadataBuilder.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdPMetadataBuilder.java index c580087daf183..58a43307d9b0e 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdPMetadataBuilder.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdPMetadataBuilder.java @@ -246,7 +246,7 @@ private Organization buildOrganization() { return org; } - private ContactPerson buildContact(SamlIdentityProvider.ContactInfo contact) { + private static ContactPerson buildContact(SamlIdentityProvider.ContactInfo contact) { final GivenName givenName = new GivenNameBuilder().buildObject(); givenName.setValue(contact.givenName); final SurName surName = new SurNameBuilder().buildObject(); diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocument.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocument.java index c16f27229c5a2..ba2c971f0e0d9 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocument.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocument.java @@ -163,7 +163,7 @@ public List getIdentityProviderX509MetadataSigningCertificates( return decodeCertificates(this.identityProviderMetadataSigning); } - private List encodeCertificates(Collection certificates) { + private static List encodeCertificates(Collection certificates) { return certificates == null ? List.of() : certificates.stream().map(cert -> { try { return cert.getEncoded(); @@ -173,14 +173,14 @@ private List encodeCertificates(Collection certificates }).map(Base64.getEncoder()::encodeToString).toList(); } - private List decodeCertificates(List encodedCertificates) { + private static List decodeCertificates(List encodedCertificates) { if (encodedCertificates == null || encodedCertificates.isEmpty()) { return List.of(); } - return encodedCertificates.stream().map(this::decodeCertificate).toList(); + return encodedCertificates.stream().map(Certificates::decodeCertificate).toList(); } - private X509Certificate decodeCertificate(String base64Cert) { + private static X509Certificate decodeCertificate(String base64Cert) { final byte[] bytes = base64Cert.getBytes(StandardCharsets.UTF_8); try (InputStream stream = new ByteArrayInputStream(bytes)) { final List certificates = CertParsingUtils.readCertificates(Base64.getDecoder().wrap(stream)); diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderFactory.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderFactory.java index ec8fc728c61f3..25a3ae59f17fd 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderFactory.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderFactory.java @@ -89,7 +89,7 @@ private ServiceProviderPrivileges buildPrivileges(SamlServiceProviderDocument.Pr return new ServiceProviderPrivileges(defaults.applicationName, resource, roleMapping); } - private URL parseUrl(SamlServiceProviderDocument document) { + private static URL parseUrl(SamlServiceProviderDocument document) { final URL acs; try { acs = new URL(document.acs); diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java index ec3973a4b5781..558ac9a20dc0c 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java @@ -332,7 +332,7 @@ private void findDocuments(QueryBuilder query, ActionListener } } - private SamlServiceProviderDocument toServiceProviderDocument(String serviceJson) throws IOException { + private static SamlServiceProviderDocument toServiceProviderDocument(String serviceJson) throws IOException { try (XContentParser docParser = parser(new BytesArray(serviceJson))) { return SamlServiceProviderDocument.fromXContent(null, docParser); } @@ -195,7 +195,7 @@ private static XContentParser parser(BytesReference body) throws IOException { return XContentHelper.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, body, XContentType.JSON); } - private String extractGroup(Matcher matcher, String name) { + private static String extractGroup(Matcher matcher, String name) { try { return matcher.group(name); } catch (IllegalArgumentException e) { diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlFactory.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlFactory.java index 59dfc0cefc913..a46bea16830f9 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlFactory.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlFactory.java @@ -78,7 +78,7 @@ public T object(Class type, QName elementName, QName sc return cast(type, elementName, obj); } - private T cast(Class type, QName elementName, XMLObject obj) { + private static T cast(Class type, QName elementName, XMLObject obj) { if (type.isInstance(obj)) { return type.cast(obj); } else { @@ -120,7 +120,7 @@ public String toString(Element element, boolean pretty) { } } - public T buildXmlObject(Element element, Class type) { + public static T buildXmlObject(Element element, Class type) { try { UnmarshallerFactory unmarshallerFactory = getUnmarshallerFactory(); Unmarshaller unmarshaller = unmarshallerFactory.getUnmarshaller(element); @@ -163,26 +163,14 @@ public String getXmlContent(SAMLObject object, boolean prettyPrint) { } } - public boolean elementNameMatches(Element element, String namespace, String localName) { + public static boolean elementNameMatches(Element element, String namespace, String localName) { return localName.equals(element.getLocalName()) && namespace.equals(element.getNamespaceURI()); } - public String text(Element dom, int length) { + public static String text(Element dom, int length) { return text(dom, length, 0); } - public String text(XMLObject xml, int prefixLength, int suffixLength) { - final Element dom = xml.getDOM(); - if (dom == null) { - return null; - } - return text(dom, prefixLength, suffixLength); - } - - public String text(XMLObject xml, int length) { - return text(xml, length, 0); - } - protected static String text(Element dom, int prefixLength, int suffixLength) { final String text = dom.getTextContent().trim(); @@ -202,7 +190,7 @@ protected static String text(Element dom, int prefixLength, int suffixLength) { } } - public String describeCredentials(Collection credentials) { + public static String describeCredentials(Collection credentials) { return credentials.stream().map(c -> { if (c == null) { return ""; @@ -221,7 +209,7 @@ public String describeCredentials(Collection credentials) }).collect(Collectors.joining(",")); } - public Element toDomElement(XMLObject object) { + public static Element toDomElement(XMLObject object) { try { return XMLObjectSupport.marshall(object); } catch (MarshallingException e) { @@ -230,7 +218,7 @@ public Element toDomElement(XMLObject object) { } @SuppressForbidden(reason = "This is the only allowed way to construct a Transformer") - public Transformer getHardenedXMLTransformer() throws TransformerConfigurationException { + public static Transformer getHardenedXMLTransformer() throws TransformerConfigurationException { final TransformerFactory tfactory = TransformerFactory.newInstance(); tfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); tfactory.setAttribute(XMLConstants.ACCESS_EXTERNAL_DTD, ""); @@ -282,7 +270,7 @@ public static DocumentBuilder getHardenedBuilder(String[] schemaFiles) throws Pa return documentBuilder; } - public String getJavaAlorithmNameFromUri(String sigAlg) { + public static String getJavaAlorithmNameFromUri(String sigAlg) { return switch (sigAlg) { case "http://www.w3.org/2000/09/xmldsig#dsa-sha1" -> "SHA1withDSA"; case "http://www.w3.org/2000/09/xmldsig#dsa-sha256" -> "SHA256withDSA"; diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlObjectSigner.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlObjectSigner.java index fd4aca48a3b02..cf3c8627acb5c 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlObjectSigner.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/support/SamlObjectSigner.java @@ -41,7 +41,7 @@ public Element sign(SignableXMLObject object) { signature.setSignatureAlgorithm(SignatureConstants.ALGO_ID_SIGNATURE_RSA_SHA256); signature.setCanonicalizationAlgorithm(SignatureConstants.ALGO_ID_C14N_EXCL_OMIT_COMMENTS); object.setSignature(signature); - Element element = samlFactory.toDomElement(object); + Element element = SamlFactory.toDomElement(object); try { AccessController.doPrivileged((PrivilegedExceptionAction) () -> { try (RestorableContextClassLoader ignore = new RestorableContextClassLoader(SignerProvider.class)) { diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/idp/SamlMetadataGeneratorTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/idp/SamlMetadataGeneratorTests.java index 4395ada877337..b93e3fe9d7f7f 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/idp/SamlMetadataGeneratorTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/idp/SamlMetadataGeneratorTests.java @@ -117,7 +117,7 @@ public void testGenerateAndSignMetadata() throws Exception { SamlFactory factory = new SamlFactory(); SamlMetadataGenerator generator = new SamlMetadataGenerator(factory, idp); Element element = generator.possiblySignDescriptor(generator.buildEntityDescriptor(sp), signingCredential); - EntityDescriptor descriptor = factory.buildXmlObject(element, EntityDescriptor.class); + EntityDescriptor descriptor = SamlFactory.buildXmlObject(element, EntityDescriptor.class); Signature signature = descriptor.getSignature(); assertNotNull(signature); SAMLSignatureProfileValidator profileValidator = new SAMLSignatureProfileValidator(); diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/test/IdpSamlTestCase.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/test/IdpSamlTestCase.java index d80e5d373fe5c..aa489b5d14717 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/test/IdpSamlTestCase.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/test/IdpSamlTestCase.java @@ -145,7 +145,7 @@ protected T domElementToXmlObject(Element element, Class { AllocateAction allocateAction = (AllocateAction) action; diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java index ecba96432839b..204bebcff4499 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java @@ -103,7 +103,7 @@ public Collection getSystemIndexDescriptors(Settings sett ); } - private Settings getIndexSettings() { + private static Settings getIndexSettings() { return Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") @@ -111,7 +111,7 @@ private Settings getIndexSettings() { .build(); } - private XContentBuilder getIndexMappings() { + private static XContentBuilder getIndexMappings() { try { final XContentBuilder builder = jsonBuilder(); { diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/TransportDeletePipelineAction.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/TransportDeletePipelineAction.java index b8e5ea26d01f3..4c0e90ce97b1b 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/TransportDeletePipelineAction.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/TransportDeletePipelineAction.java @@ -46,7 +46,7 @@ protected void doExecute(Task task, DeletePipelineRequest request, ActionListene ); } - private void handleFailure(Exception e, ActionListener listener) { + private static void handleFailure(Exception e, ActionListener listener) { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof IndexNotFoundException) { listener.onResponse(new DeletePipelineResponse(false)); diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineAction.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineAction.java index d9cca1b59e58f..b4b9c4fabac54 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineAction.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineAction.java @@ -72,7 +72,7 @@ protected void doExecute(Task task, GetPipelineRequest request, ActionListener wildcardPipelinePatterns = request.ids() .stream() .filter(pipeline -> pipeline.contains(WILDCARD)) - .map(this::toWildcardPipelineIdPattern) + .map(TransportGetPipelineAction::toWildcardPipelineIdPattern) .map(Pattern::compile) .collect(Collectors.toSet()); @@ -129,7 +129,7 @@ private void getPipelinesByIds(Set ids, ActionListener handleFailure(e, listener))); } - private void handleFailure(Exception e, ActionListener listener) { + private static void handleFailure(Exception e, ActionListener listener) { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof IndexNotFoundException) { listener.onResponse(new GetPipelineResponse(Map.of())); @@ -202,7 +202,7 @@ private void handleFilteringSearchResponse( } } - private void logFailures(MultiGetResponse multiGetResponse) { + private static void logFailures(MultiGetResponse multiGetResponse) { List ids = Arrays.stream(multiGetResponse.getResponses()) .filter(MultiGetItemResponse::isFailed) .filter(itemResponse -> itemResponse.getFailure() != null) @@ -213,7 +213,7 @@ private void logFailures(MultiGetResponse multiGetResponse) { } } - private String toWildcardPipelineIdPattern(String wildcardPipelineId) { + private static String toWildcardPipelineIdPattern(String wildcardPipelineId) { Matcher matcher = WILDCARD_PATTERN.matcher(wildcardPipelineId); StringBuilder stringBuilder = new StringBuilder(); while (matcher.find()) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index d2d6bd4fcb443..323f2661d1098 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -868,7 +868,7 @@ private void disallowMlNodeAttributes(String... mlNodeAttributes) { } } - private void reportClashingNodeAttribute(String attrName) { + private static void reportClashingNodeAttribute(String attrName) { throw new IllegalArgumentException( "Directly setting [" + attrName diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningUsageTransportAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningUsageTransportAction.java index d16a445133ccb..76710b8eb16c6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningUsageTransportAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningUsageTransportAction.java @@ -249,7 +249,7 @@ private void addJobsUsage(GetJobsStatsAction.Response response, List jobs, List jobsStats = response.getResponse().results(); Map jobMap = jobs.stream().collect(Collectors.toMap(Job::getId, item -> item)); Map allJobsCreatedBy = jobs.stream() - .map(this::jobCreatedBy) + .map(MachineLearningUsageTransportAction::jobCreatedBy) .collect(Collectors.groupingBy(item -> item, Collectors.counting())); ; for (GetJobsStatsAction.Response.JobStats jobStats : jobsStats) { @@ -294,7 +294,7 @@ private void addJobsUsage(GetJobsStatsAction.Response response, List jobs, } } - private String jobCreatedBy(Job job) { + private static String jobCreatedBy(Job job) { Map customSettings = job.getCustomSettings(); if (customSettings == null || customSettings.containsKey(MachineLearningFeatureSetUsage.CREATED_BY) == false) { return "unknown"; @@ -304,7 +304,7 @@ private String jobCreatedBy(Job job) { return customSettings.get(MachineLearningFeatureSetUsage.CREATED_BY).toString().replaceAll("\\W", "_"); } - private Map createJobUsageEntry( + private static Map createJobUsageEntry( long count, StatsAccumulator detectorStats, StatsAccumulator modelSizeStats, @@ -320,7 +320,7 @@ private Map createJobUsageEntry( return usage; } - private void addDatafeedsUsage(GetDatafeedsStatsAction.Response response, Map datafeedsUsage) { + private static void addDatafeedsUsage(GetDatafeedsStatsAction.Response response, Map datafeedsUsage) { Map datafeedCountByState = new HashMap<>(); List datafeedsStats = response.getResponse().results(); @@ -337,13 +337,13 @@ private void addDatafeedsUsage(GetDatafeedsStatsAction.Response response, Map createCountUsageEntry(long count) { + private static Map createCountUsageEntry(long count) { Map usage = new HashMap<>(); usage.put(MachineLearningFeatureSetUsage.COUNT, count); return usage; } - private void addDataFrameAnalyticsStatsUsage( + private static void addDataFrameAnalyticsStatsUsage( GetDataFrameAnalyticsStatsAction.Response response, Map dataframeAnalyticsUsage ) { @@ -371,7 +371,10 @@ private void addDataFrameAnalyticsStatsUsage( } } - private void addDataFrameAnalyticsUsage(GetDataFrameAnalyticsAction.Response response, Map dataframeAnalyticsUsage) { + private static void addDataFrameAnalyticsUsage( + GetDataFrameAnalyticsAction.Response response, + Map dataframeAnalyticsUsage + ) { Map perAnalysisTypeCounterMap = new HashMap<>(); for (DataFrameAnalyticsConfig config : response.getResources().results()) { @@ -405,7 +408,7 @@ private void addInferenceUsage(ActionListener> listener) { } } - private void addDeploymentStats(GetTrainedModelsStatsAction.Response statsResponse, Map inferenceUsage) { + private static void addDeploymentStats(GetTrainedModelsStatsAction.Response statsResponse, Map inferenceUsage) { StatsAccumulator modelSizes = new StatsAccumulator(); int deploymentsCount = 0; double avgTimeSum = 0.0; @@ -442,7 +445,7 @@ private void addDeploymentStats(GetTrainedModelsStatsAction.Response statsRespon ); } - private void addTrainedModelStats( + private static void addTrainedModelStats( GetTrainedModelsAction.Response modelsResponse, GetTrainedModelsStatsAction.Response statsResponse, Map inferenceUsage @@ -492,7 +495,7 @@ private void addTrainedModelStats( } // TODO separate out ours and users models possibly regression vs classification - private void addInferenceIngestUsage(GetTrainedModelsStatsAction.Response statsResponse, Map inferenceUsage) { + private static void addInferenceIngestUsage(GetTrainedModelsStatsAction.Response statsResponse, Map inferenceUsage) { int pipelineCount = 0; StatsAccumulator docCountStats = new StatsAccumulator(); StatsAccumulator timeStats = new StatsAccumulator(); @@ -517,7 +520,7 @@ private void addInferenceIngestUsage(GetTrainedModelsStatsAction.Response statsR inferenceUsage.put("ingest_processors", Collections.singletonMap(MachineLearningFeatureSetUsage.ALL, ingestUsage)); } - private Map getMinMaxSumAsLongsFromStats(StatsAccumulator stats) { + private static Map getMinMaxSumAsLongsFromStats(StatsAccumulator stats) { Map asMap = Maps.newMapWithExpectedSize(3); asMap.put("sum", Double.valueOf(stats.getTotal()).longValue()); asMap.put("min", Double.valueOf(stats.getMin()).longValue()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java index 409c1e5d24fcd..3df836e5f9043 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java @@ -75,7 +75,7 @@ public class MlAssignmentNotifier implements ClusterStateListener { clusterService.addListener(this); } - private String executorName() { + private static String executorName() { return ThreadPool.Names.GENERIC; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 178853ad4fa5b..62029e5e9cb98 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -530,7 +530,7 @@ public void onFailure(Exception e) { } } - private void sendResponseOrFailure( + private static void sendResponseOrFailure( String jobId, ActionListener listener, AtomicArray failures diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java index 407c8cc18ae22..e7afc79bd3644 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteCalendarAction.java @@ -80,7 +80,7 @@ protected void doExecute(Task task, DeleteCalendarAction.Request request, Action jobResultsProvider.calendar(calendarId, calendarListener); } - private DeleteByQueryRequest buildDeleteByQuery(String calendarId) { + private static DeleteByQueryRequest buildDeleteByQuery(String calendarId) { DeleteByQueryRequest request = new DeleteByQueryRequest(MlMetaIndex.indexName()); request.setSlices(AbstractBulkByScrollRequest.AUTO_SLICES); request.setRefresh(true); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java index ead2d8cda30b3..10679f447db15 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java @@ -115,7 +115,7 @@ private void forceDelete( stopJob(parentTaskClient, request, stopListener); } - private void stopJob( + private static void stopJob( ParentTaskAssigningClient parentTaskClient, DeleteDataFrameAnalyticsAction.Request request, ActionListener listener diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java index c8da075bc20db..11ad47c00ebd1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java @@ -216,7 +216,7 @@ private static Tuple getStatusAndReason(final BulkByScrol return new Tuple<>(status, reason); } - private DeleteByQueryRequest buildDeleteByQuery(String jobId, List forecastsToDelete) { + private static DeleteByQueryRequest buildDeleteByQuery(String jobId, List forecastsToDelete) { BoolQueryBuilder innerBoolQuery = QueryBuilders.boolQuery() .must( QueryBuilders.termsQuery( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index 6057493c95289..7842af8b12993 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -280,7 +280,7 @@ private void forceDeleteJob( killProcess(parentTaskClient, jobId, killJobListener); } - private void killProcess( + private static void killProcess( ParentTaskAssigningClient parentTaskClient, String jobId, ActionListener listener @@ -300,7 +300,7 @@ private void removePersistentTask(String jobId, ClusterState currentState, Actio } } - private void checkJobIsNotOpen(String jobId, ClusterState state) { + private static void checkJobIsNotOpen(String jobId, ClusterState state) { PersistentTasksCustomMetadata tasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); if (jobTask != null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index 2827967c42cd5..b1d799cd33622 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -322,7 +322,7 @@ static TrainedModelAssignment pickAssignment(List assign throw new IllegalStateException(); } - private ActionListener collectingListener( + private static ActionListener collectingListener( AtomicInteger count, AtomicArray> results, AtomicReference failure, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java index fe4a341051ee5..6a67f942c0f19 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java @@ -115,7 +115,7 @@ private ByteSizeValue defaultModelMemoryLimit() { return defaultLimit; } - private Map datafeedsDefaults() { + private static Map datafeedsDefaults() { Map anomalyDetectorsDefaults = new HashMap<>(); anomalyDetectorsDefaults.put(DatafeedConfig.SCROLL_SIZE.getPreferredName(), DatafeedConfig.DEFAULT_SCROLL_SIZE); return anomalyDetectorsDefaults; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java index 81287ce749d83..5450b2752ab97 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java @@ -259,7 +259,7 @@ private void revertSnapshot( }, listener::onFailure); } - private void getModelSnapshot( + private static void getModelSnapshot( RevertModelSnapshotAction.Request request, JobResultsProvider provider, Consumer handler, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java index ab215106c8ed0..24f3ef90ad76d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java @@ -340,7 +340,7 @@ private void validateSourceIndexHasAnalyzableData(StartContext startContext, Act validateSourceIndexHasAtLeastOneAnalyzedField(startContext, validateAtLeastOneAnalyzedFieldListener); } - private void validateSourceIndexHasAtLeastOneAnalyzedField(StartContext startContext, ActionListener listener) { + private static void validateSourceIndexHasAtLeastOneAnalyzedField(StartContext startContext, ActionListener listener) { Set requiredFields = startContext.config.getAnalysis() .getRequiredFields() .stream() @@ -415,7 +415,7 @@ private void getProgress(DataFrameAnalyticsConfig config, ActionListener listener diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 475ca4ef2a7ce..b02f6339e49c0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -431,7 +431,7 @@ public void onFailure(Exception e) { }); } - private ElasticsearchStatusException createUnlicensedError( + private static ElasticsearchStatusException createUnlicensedError( final String datafeedId, final RemoteClusterLicenseChecker.LicenseCheck licenseCheck ) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java index f339376d97a03..961331c33c2ce 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java @@ -290,7 +290,7 @@ private void forceStop( } } - private void sendResponseOrFailure( + private static void sendResponseOrFailure( String analyticsId, ActionListener listener, AtomicArray failures diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index 614fcfaed6120..2c9668a504b55 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -456,7 +456,7 @@ protected void doRun() { })); } - private void sendResponseOrFailure( + private static void sendResponseOrFailure( String datafeedId, ActionListener listener, AtomicArray failures diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java index f6affc3d12340..1a54533ae9262 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java @@ -240,7 +240,7 @@ protected ImmutableTransactionStore mapFinalize(HashBasedTransactionStore transa /** * rewrites items that use ordinals to their real values */ - private ImmutableTransactionStore rewriteOrdinalItems( + private static ImmutableTransactionStore rewriteOrdinalItems( ImmutableTransactionStore transactionStore, List ordinalLookups ) throws IOException { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java index 5cb9bf543fd19..324da870b1a40 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScore.java @@ -196,7 +196,7 @@ public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long return FastMath.max(-FastMath.log(FastMath.max(pValue, Double.MIN_NORMAL)), 0.0); } - private double eps(double value) { + private static double eps(double value) { return Math.max(0.05 * value + 0.5, 1.0); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java index 9dc0604bb2e26..5605a80a7454c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java @@ -108,7 +108,10 @@ public MlProcessorAutoscalingCapacity scale( .build(); } - private boolean hasUnsatisfiedDeployments(TrainedModelAssignmentMetadata trainedModelAssignmentMetadata, List mlNodes) { + private static boolean hasUnsatisfiedDeployments( + TrainedModelAssignmentMetadata trainedModelAssignmentMetadata, + List mlNodes + ) { final Set mlNodeIds = mlNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); return trainedModelAssignmentMetadata.allAssignments() .values() @@ -117,7 +120,9 @@ private boolean hasUnsatisfiedDeployments(TrainedModelAssignmentMetadata trained .anyMatch(deployment -> deployment.isSatisfied(mlNodeIds) == false); } - private MlProcessorAutoscalingCapacity.Builder computeRequiredCapacity(TrainedModelAssignmentMetadata trainedModelAssignmentMetadata) { + private static MlProcessorAutoscalingCapacity.Builder computeRequiredCapacity( + TrainedModelAssignmentMetadata trainedModelAssignmentMetadata + ) { int maxThreadsPerAllocation = 0; double processorCount = 0; boolean hasLowPriorityDeployments = false; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index d4757d5be9ce5..b32bdf6dcbccf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -482,7 +482,7 @@ private DataCounts postData(InputStream inputStream, XContentType xContentType) } } - private boolean isConflictException(Exception e) { + private static boolean isConflictException(Exception e) { return e instanceof ElasticsearchStatusException && ((ElasticsearchStatusException) e).status() == RestStatus.CONFLICT; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index ef8b589735585..bdc143c7dde4c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -272,7 +272,7 @@ public void deleteDatafeed(DeleteDatafeedAction.Request request, ClusterState st } - private PersistentTasksCustomMetadata.PersistentTask getDatafeedTask(ClusterState state, String datafeedId) { + private static PersistentTasksCustomMetadata.PersistentTask getDatafeedTask(ClusterState state, String datafeedId) { PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); return MlTasks.getDatafeedTask(datafeedId, tasks); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java index 799d884342226..99ad92ee2b91e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java @@ -356,7 +356,7 @@ private String getJobIdIfDatafeedRunningOnThisNode(TransportStartDatafeedAction. return holder.getJobId(); } - private JobState getJobState(PersistentTasksCustomMetadata tasks, String jobId) { + private static JobState getJobState(PersistentTasksCustomMetadata tasks, String jobId) { return MlTasks.getJobStateModifiedForReassignments(jobId, tasks); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java index cb27cfe28a9bd..0d4ba9fd2086d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java @@ -157,7 +157,7 @@ private SearchSourceBuilder buildBaseSearchSource() { protected abstract T buildSearchRequest(SearchSourceBuilder searchRequestBuilder); - private Aggregations validateAggs(@Nullable Aggregations aggs) { + private static Aggregations validateAggs(@Nullable Aggregations aggs) { if (aggs == null) { return null; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java index 35e5ac4df0de1..ea70418a0c6a3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java @@ -300,7 +300,7 @@ private Collection processCompositeAggBucketKeys(Map buc * Date Histograms have a {@link ZonedDateTime} object as the key, * Histograms have either a Double or Long. */ - private long toHistogramKeyToEpoch(Object key) { + private static long toHistogramKeyToEpoch(Object key) { if (key instanceof ZonedDateTime) { return ((ZonedDateTime) key).toInstant().toEpochMilli(); } else if (key instanceof Double) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java index 316e5707a48dd..211fd8743f49c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -520,7 +520,7 @@ public void expandDatafeedConfigs( } - private QueryBuilder buildDatafeedIdQuery(String[] tokens) { + private static QueryBuilder buildDatafeedIdQuery(String[] tokens) { QueryBuilder datafeedQuery = new TermQueryBuilder(DatafeedConfig.CONFIG_TYPE.getPreferredName(), DatafeedConfig.TYPE); if (Strings.isAllOrWildcard(tokens)) { // match all @@ -555,7 +555,7 @@ static Collection matchingDatafeedIdsWithTasks(String[] datafeedIdPatter return MlStrings.findMatching(datafeedIdPatterns, MlTasks.startedDatafeedIds(tasksMetadata)); } - private QueryBuilder buildDatafeedJobIdsQuery(Collection jobIds) { + private static QueryBuilder buildDatafeedJobIdsQuery(Collection jobIds) { BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); boolQueryBuilder.filter(new TermQueryBuilder(DatafeedConfig.CONFIG_TYPE.getPreferredName(), DatafeedConfig.TYPE)); boolQueryBuilder.filter(new TermsQueryBuilder(Job.ID.getPreferredName(), jobIds)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java index 618c0ec95ee01..e54fd50936ece 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetector.java @@ -249,7 +249,7 @@ private void addExcludedField(String field, String reason, Set f fieldSelection.add(FieldSelection.excluded(field, getMappingTypes(field), reason)); } - private void addExcludedNestedPattern(String pattern, Set fieldSelection) { + private static void addExcludedNestedPattern(String pattern, Set fieldSelection) { fieldSelection.add( FieldSelection.excluded(pattern, Collections.singleton(NestedObjectMapper.CONTENT_TYPE), "nested fields are not supported") ); @@ -344,7 +344,7 @@ private void includeAndExcludeFields(Set fields, Set fie } } - private Set expandFields(String[] fields, Set nameset, boolean allowNoMatch) { + private static Set expandFields(String[] fields, Set nameset, boolean allowNoMatch) { NameResolver nameResolver = NameResolver.newUnaliased( nameset, (ex) -> new ResourceNotFoundException(Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_BAD_FIELD_FILTER, ex)) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java index 13d233fd617bc..db68f49b78429 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java @@ -144,7 +144,7 @@ private void getCardinalitiesForFieldsWithConstraints( ClientHelper.executeWithHeadersAsync(config.getHeaders(), ML_ORIGIN, client, SearchAction.INSTANCE, searchRequest, searchListener); } - private void buildFieldCardinalitiesMap( + private static void buildFieldCardinalitiesMap( DataFrameAnalyticsConfig config, SearchResponse searchResponse, ActionListener> listener diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java index ed738acc2e63d..de306b2ece1a2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java @@ -235,8 +235,11 @@ private void processData( } } - private void writeDataRows(DataFrameDataExtractor dataExtractor, AnalyticsProcess process, DataFrameAnalyticsTask task) - throws IOException { + private static void writeDataRows( + DataFrameDataExtractor dataExtractor, + AnalyticsProcess process, + DataFrameAnalyticsTask task + ) throws IOException { ProgressTracker progressTracker = task.getStatsHolder().getProgressTracker(); DataCountsTracker dataCountsTracker = task.getStatsHolder().getDataCountsTracker(); @@ -270,7 +273,7 @@ private void writeDataRows(DataFrameDataExtractor dataExtractor, AnalyticsProces } } - private void writeHeaderRecord( + private static void writeHeaderRecord( DataFrameDataExtractor dataExtractor, AnalyticsProcess process, DataFrameAnalyticsTask task diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoiner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoiner.java index f4b95ddbd451f..ee91b0637bfc7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoiner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoiner.java @@ -118,7 +118,7 @@ private void executeBulkRequest(BulkRequest bulkRequest) { ); } - private void checkChecksumsMatch(DataFrameDataExtractor.Row row, RowResults result) { + private static void checkChecksumsMatch(DataFrameDataExtractor.Row row, RowResults result) { if (row.getChecksum() != result.getChecksum()) { String msg = "Detected checksum mismatch for document with id [" + row.getHit().getId() + "]; "; msg += "expected [" + row.getChecksum() + "] but result had [" + result.getChecksum() + "]; "; @@ -186,7 +186,7 @@ public DataFrameDataExtractor.Row next() { return row; } - private boolean hasNoMatch(DataFrameDataExtractor.Row row) { + private static boolean hasNoMatch(DataFrameDataExtractor.Row row) { return row == null || row.shouldSkip() || row.isTraining() == false; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/extractor/GeoPointField.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/extractor/GeoPointField.java index 03baf1528ee1e..9edc72ca38f73 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/extractor/GeoPointField.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/extractor/GeoPointField.java @@ -40,7 +40,7 @@ public Object[] value(SearchHit hit) { return value; } - private String handleString(String geoString) { + private static String handleString(String geoString) { if (geoString.contains(",")) { // Entry is of the form "38.897676, -77.03653" return geoString.replace(" ", ""); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/extractor/GeoShapeField.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/extractor/GeoShapeField.java index 2c58d2b1be245..d7b8827add05a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/extractor/GeoShapeField.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/extractor/GeoShapeField.java @@ -54,7 +54,7 @@ public Object[] value(SearchHit hit) { return value; } - private String handleString(String geoString) { + private static String handleString(String geoString) { try { if (geoString.startsWith("POINT")) { // Entry is of the form "POINT (-77.03653 38.897676)" Geometry geometry = WellKnownText.fromWKT(StandardValidator.instance(true), true, geoString); @@ -71,7 +71,7 @@ private String handleString(String geoString) { } } - private String handleObject(Map geoObject) { + private static String handleObject(Map geoObject) { String geoType = (String) geoObject.get("type"); if (geoType != null && "point".equals(geoType.toLowerCase(Locale.ROOT))) { @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index fe8366065dac7..44ead5ca35dd3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -603,7 +603,7 @@ private void updateNumberOfAllocations(TrainedModelAssignmentMetadata assignment } } - private boolean hasStartingAssignments(TrainedModelAssignment assignment) { + private static boolean hasStartingAssignments(TrainedModelAssignment assignment) { return assignment.getNodeRoutingTable() .values() .stream() diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java index f9e9ebed5acc4..e1241dc8a93c3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java @@ -107,7 +107,7 @@ AssignmentPlan computeAssignmentPlan() { return mergePlans(nodesByZone, planForNormalPriorityModels, planForLowPriorityModels); } - private AssignmentPlan mergePlans( + private static AssignmentPlan mergePlans( Map, List> nodesByZone, AssignmentPlan planForNormalPriorityModels, AssignmentPlan planForLowPriorityModels @@ -249,7 +249,7 @@ private AssignmentPlan computePlanForLowPriorityModels(Set assignableNod return new AssignmentPlanner(planNodes, planDeployments).computePlan(); } - private Map findFittingAssignments( + private static Map findFittingAssignments( TrainedModelAssignment assignment, Set assignableNodeIds, Map remainingNodeMemory diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AllocationReducer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AllocationReducer.java index 4dbc13fb233e0..a8b58b04ac22e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AllocationReducer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AllocationReducer.java @@ -132,7 +132,7 @@ public TrainedModelAssignment.Builder reduceTo(int numberOfAllocations) { return buildUpdatedAssignment(numberOfAllocations, allocationsByNode); } - private boolean canAssignmentBeRemovedEntirely( + private static boolean canAssignmentBeRemovedEntirely( Map.Entry assignment, int minAllocationsInOtherZones, int zoneAllocations, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java index 985d012de5f95..73b713cced32a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java @@ -164,7 +164,7 @@ private AssignmentPlan solvePreservingAllAllocationsOnCurrentAssignments() { return solvePreservingCurrentAssignments(new PreserveAllAllocations(nodes, deployments)); } - private AssignmentPlan solvePreservingCurrentAssignments(AbstractPreserveAllocations preserveAllocations) { + private static AssignmentPlan solvePreservingCurrentAssignments(AbstractPreserveAllocations preserveAllocations) { List planNodes = preserveAllocations.nodesPreservingAllocations(); List planDeployments = preserveAllocations.modelsPreservingAllocations(); logger.trace(() -> format("Nodes after applying allocation preserving strategy = %s", planNodes)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java index 742bef2852caa..90c5a2257d94d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java @@ -200,11 +200,11 @@ private static int distance(int x, int y) { return distance == Integer.MIN_VALUE ? Integer.MAX_VALUE : Math.abs(distance); } - private double minWeight(Deployment m, Node n, double w) { + private static double minWeight(Deployment m, Node n, double w) { return m.currentAllocationsByNodeId().containsKey(n.id()) ? w / 2 : 0; } - private double maxWeight(Deployment m, Node n, double w) { + private static double maxWeight(Deployment m, Node n, double w) { return m.currentAllocationsByNodeId().containsKey(n.id()) ? w : w / 2; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java index 99007110c8a50..dafc07099f850 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java @@ -194,7 +194,7 @@ private void assignExcessCores(Node n) { // assign any extra cores this node has to the models in descending size order. for (AssignmentPlan.Deployment m : deployments.stream() .filter(m -> assignments.get(Tuple.tuple(m, n)) == 1 && resourceTracker.remainingModelAllocations.get(m) > 0) - .sorted(Comparator.comparingDouble(this::remainingModelOrder)) + .sorted(Comparator.comparingDouble(AssignmentHolder::remainingModelOrder)) .toList()) { if (resourceTracker.remainingNodeCores.get(n) <= 0) { break; @@ -210,7 +210,7 @@ private void assignExcessCores(Node n) { zeroSoftAssignmentsOfSatisfiedModels(); } - private double remainingModelOrder(AssignmentPlan.Deployment m) { + private static double remainingModelOrder(AssignmentPlan.Deployment m) { return (m.currentAllocationsByNodeId().isEmpty() ? 1 : 2) * -m.memoryBytes(); } @@ -334,7 +334,7 @@ private Map, Integer> tryAssigningRemaini for (Deployment m : deployments.stream() .filter(m -> resourceTracker.remainingModelAllocations.get(m) > 0) - .sorted(Comparator.comparingDouble(this::remainingModelOrder)) + .sorted(Comparator.comparingDouble(AssignmentHolder::remainingModelOrder)) .toList()) { for (Node n : nodes.stream() .filter( @@ -369,7 +369,7 @@ private Map, Integer> tryAssigningRemaini return resultAllocations; } - private double remainingNodeOrder( + private static double remainingNodeOrder( Node n, AssignmentPlan.Deployment m, int remainingNodeCores, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/NerProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/NerProcessor.java index a0c8ddcba5125..599b3c90204ef 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/NerProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/NerProcessor.java @@ -88,7 +88,7 @@ public String toString() { /** * Checks labels are valid entity tags and none are duplicated */ - private void validate(List classificationLabels) { + private static void validate(List classificationLabels) { if (classificationLabels == null || classificationLabels.isEmpty()) { return; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java index 93b3c1f9b35fb..c6eca511b895e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/tokenizers/PrecompiledCharMapNormalizer.java @@ -87,19 +87,19 @@ public PrecompiledCharMapNormalizer(int[] offsets, String normalizedStr, Reader this.normalizedStrUtf8Bytes = normalizedStr.getBytes(StandardCharsets.UTF_8); } - private boolean hasLeaf(int v) { + private static boolean hasLeaf(int v) { return ((v >>> 8) & 1) == 1; } - private int label(int v) { + private static int label(int v) { return (v & ((1 << 31) | 0xFF)); } - private int value(int v) { + private static int value(int v) { return (v & ((1 << 31) - 1)); } - private int offset(int v) { + private static int offset(int v) { return (v >>> 10) << ((v & (1 << 9)) >>> 6); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java index 297ecbd86d2ac..b704dc37bfc22 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java @@ -1045,7 +1045,7 @@ public void expandIds( public void getInferenceStats(String[] modelIds, @Nullable TaskId parentTaskId, ActionListener> listener) { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); - Arrays.stream(modelIds).map(this::buildStatsSearchRequest).forEach(multiSearchRequest::add); + Arrays.stream(modelIds).map(TrainedModelProvider::buildStatsSearchRequest).forEach(multiSearchRequest::add); if (multiSearchRequest.requests().isEmpty()) { listener.onResponse(Collections.emptyList()); return; @@ -1103,7 +1103,7 @@ public void getInferenceStats(String[] modelIds, @Nullable TaskId parentTaskId, ); } - private SearchRequest buildStatsSearchRequest(String modelId) { + private static SearchRequest buildStatsSearchRequest(String modelId) { BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery(InferenceStats.MODEL_ID.getPreferredName(), modelId)) .filter(QueryBuilders.termQuery(InferenceStats.TYPE.getPreferredName(), InferenceStats.NAME)); @@ -1136,7 +1136,7 @@ private SearchRequest buildStatsSearchRequest(String modelId) { ); } - private InferenceStats handleMultiNodeStatsResponse(SearchResponse response, String modelId) { + private static InferenceStats handleMultiNodeStatsResponse(SearchResponse response, String modelId) { if (response.getAggregations() == null) { logger.trace(() -> "[" + modelId + "] no previously stored stats found"); return null; @@ -1245,7 +1245,7 @@ private static QueryBuilder buildQueryIdExpressionQuery(String[] tokens, String return boolQuery; } - private Set matchedResourceIds(String[] tokens) { + private static Set matchedResourceIds(String[] tokens) { if (Strings.isAllOrWildcard(tokens)) { return MODELS_STORED_AS_RESOURCE; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java index 4b925464d985b..5636762871b23 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java @@ -266,7 +266,7 @@ public synchronized ResultStats getResultStats() { ); } - private LongSummaryStatistics cloneSummaryStats(LongSummaryStatistics stats) { + private static LongSummaryStatistics cloneSummaryStats(LongSummaryStatistics stats) { return new LongSummaryStatistics(stats.getCount(), stats.getMin(), stats.getMax(), stats.getSum()); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java index 7da8591c005f6..82c9131325eee 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java @@ -112,7 +112,7 @@ private boolean writeChunk(TrainedModelDefinitionDoc doc, OutputStream outputStr return true; } - private long writeModelSize(String modelId, Long modelSizeBytes, OutputStream outputStream) throws IOException { + private static long writeModelSize(String modelId, Long modelSizeBytes, OutputStream outputStream) throws IOException { if (modelSizeBytes == null) { String message = String.format( Locale.ROOT, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 14a9474a0c787..035f4864ebace 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -540,13 +540,13 @@ private void auditJobUpdatedIfNotInternal(UpdateJobAction.Request request) { } } - private boolean isJobOpen(ClusterState clusterState, String jobId) { + private static boolean isJobOpen(ClusterState clusterState, String jobId) { PersistentTasksCustomMetadata persistentTasks = clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE); JobState jobState = MlTasks.getJobState(jobId, persistentTasks); return jobState == JobState.OPENED; } - private Set openJobIds(ClusterState clusterState) { + private static Set openJobIds(ClusterState clusterState) { PersistentTasksCustomMetadata persistentTasks = clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE); return MlTasks.openJobIds(persistentTasks); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java index 3994424ac0b6f..548c95d1ddd50 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java @@ -136,7 +136,10 @@ private void updateLoadGivenTasks(NodeLoad.Builder nodeLoad, PersistentTasksCust } } - private void updateLoadGivenModelAssignments(NodeLoad.Builder nodeLoad, TrainedModelAssignmentMetadata trainedModelAssignmentMetadata) { + private static void updateLoadGivenModelAssignments( + NodeLoad.Builder nodeLoad, + TrainedModelAssignmentMetadata trainedModelAssignmentMetadata + ) { if (trainedModelAssignmentMetadata != null && trainedModelAssignmentMetadata.allAssignments().isEmpty() == false) { for (TrainedModelAssignment assignment : trainedModelAssignmentMetadata.allAssignments().values()) { if (Optional.ofNullable(assignment.getNodeRoutingTable().get(nodeLoad.getNodeId())) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java index eababb5fab02c..4c71f8fbce139 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java @@ -747,7 +747,7 @@ static Collection matchingJobIdsWithTasks(String[] jobIdPatterns, Persis return MlStrings.findMatching(jobIdPatterns, MlTasks.openJobIds(tasksMetadata)); } - private void parseJobLenientlyFromSource(BytesReference source, ActionListener jobListener) { + private static void parseJobLenientlyFromSource(BytesReference source, ActionListener jobListener) { try ( InputStream stream = source.streamInput(); XContentParser parser = XContentFactory.xContent(XContentType.JSON) @@ -759,7 +759,7 @@ private void parseJobLenientlyFromSource(BytesReference source, ActionListener aliases = new HashSet<>(); List indices = new ArrayList<>(); for (var entry : getAliasesResponse.getAliases().entrySet()) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java index 0690a225608eb..3c0d2aca4deda 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobRenormalizedResultsPersister.java @@ -84,7 +84,7 @@ public void updateResult(String id, String index, ToXContent resultDoc) { } } - private XContentBuilder toXContentBuilder(ToXContent obj) throws IOException { + private static XContentBuilder toXContentBuilder(ToXContent obj) throws IOException { XContentBuilder builder = jsonBuilder(); obj.toXContent(builder, ToXContent.EMPTY_PARAMS); return builder; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index f09b3166964f5..1144b1afffdcb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -1010,7 +1010,7 @@ void setJobState(JobTask jobTask, JobState state, String reason) { ); } - private void logSetJobStateFailure(JobState state, String jobId, Exception e) { + private static void logSetJobStateFailure(JobState state, String jobId, Exception e) { if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { logger.debug("Could not set job state to [{}] for job [{}] as it has been closed", state, jobId); } else { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/FlushJobParams.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/FlushJobParams.java index df4ad9f1cac41..4fd2e98b16529 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/FlushJobParams.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/FlushJobParams.java @@ -186,14 +186,14 @@ private void checkValidFlushArgumentsCombination() { } } - private Long parseTimeParam(String name, String value) { + private static Long parseTimeParam(String name, String value) { if (Strings.isNullOrEmpty(value)) { return null; } return paramToEpochIfValidOrThrow(name, value) / TimeRange.MILLISECONDS_IN_SECOND; } - private long paramToEpochIfValidOrThrow(String paramName, String date) { + private static long paramToEpochIfValidOrThrow(String paramName, String date) { if (TimeRange.NOW.equals(date)) { return System.currentTimeMillis(); } @@ -208,14 +208,14 @@ private long paramToEpochIfValidOrThrow(String paramName, String date) { return epoch; } - private void checkFlushParamIsEmpty(String paramName, String paramValue) { + private static void checkFlushParamIsEmpty(String paramName, String paramValue) { if (paramValue.isEmpty() == false) { String msg = Messages.getMessage(Messages.REST_INVALID_FLUSH_PARAMS_UNEXPECTED, paramName); throw new IllegalArgumentException(msg); } } - private boolean isValidTimeRange(TimeRange timeRange) { + private static boolean isValidTimeRange(TimeRange timeRange) { return timeRange.getStart().isEmpty() == false || (timeRange.getStart().isEmpty() && timeRange.getEnd().isEmpty()); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRange.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRange.java index a344b2cd41eb0..26378a60db7c6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRange.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/TimeRange.java @@ -80,7 +80,7 @@ public TimeRange build() { return createTimeRange(start, end); } - private TimeRange createTimeRange(String start, String end) { + private static TimeRange createTimeRange(String start, String end) { Long epochStart = null; Long epochEnd = null; if (start.isEmpty() == false) { @@ -104,7 +104,7 @@ private TimeRange createTimeRange(String start, String end) { /** * Returns epoch milli seconds */ - private long paramToEpochIfValidOrThrow(String paramName, String date) { + private static long paramToEpochIfValidOrThrow(String paramName, String date) { if (NOW.equals(date)) { return System.currentTimeMillis(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java index 4b2fb3deacc6d..63d44e9b9608a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java @@ -266,7 +266,7 @@ final Collection inputFields() { /** * Find the indexes of the input fields from the header */ - protected final Map inputFieldIndexes(String[] header, Collection inputFields) { + protected static Map inputFieldIndexes(String[] header, Collection inputFields) { List headerList = Arrays.asList(header); // TODO header could be empty Map fieldIndexes = new HashMap<>(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java index 6bfc53087feac..a93e50f5110e5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/JsonDataToProcessWriter.java @@ -101,7 +101,7 @@ private void writeSmileXContent(CategorizationAnalyzer categorizationAnalyzer, I } } - private byte[] findNextObject(byte marker, InputStream data) throws IOException { + private static byte[] findNextObject(byte marker, InputStream data) throws IOException { // The underlying stream, MarkSupportingStreamInputWrapper, doesn't care about // readlimit, so just set to -1. We could pick a value, but I worry that if the // underlying implementation changes it may cause strange behavior, whereas -1 should diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java index 388e971f1e063..10a12273baff5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java @@ -183,7 +183,7 @@ private List findForecastsToDelete(SearchResponse searchResponse) return forecastsToDelete; } - private DeleteByQueryRequest buildDeleteByQuery(List ids) { + private static DeleteByQueryRequest buildDeleteByQuery(List ids) { DeleteByQueryRequest request = new DeleteByQueryRequest(); request.setSlices(AbstractBulkByScrollRequest.AUTO_SLICES); request.setTimeout(DEFAULT_MAX_DURATION); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index 0e5ed9bbdd572..537297d130789 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -120,7 +120,7 @@ public void onFailure(Exception e) { }); } - private DeleteByQueryRequest createDBQRequest(Job job, float requestsPerSec, long cutoffEpochMs) { + private static DeleteByQueryRequest createDBQRequest(Job job, float requestsPerSec, long cutoffEpochMs) { QueryBuilder excludeFilter = QueryBuilders.termsQuery( Result.RESULT_TYPE.getPreferredName(), ModelSizeStats.RESULT_TYPE_VALUE, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelper.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelper.java index a8a1215625c7d..6a5e328d7530a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelper.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/NamedPipeHelper.java @@ -182,7 +182,7 @@ public OutputStream openNamedPipeOutputStream(Path file, Duration timeout) throw * @return A stream opened to read from the named pipe. * @throws IOException if the named pipe cannot be opened. */ - private OutputStream openNamedPipeOutputStreamWindows(Path file, Duration timeout) throws IOException { + private static OutputStream openNamedPipeOutputStreamWindows(Path file, Duration timeout) throws IOException { long timeoutMillisRemaining = timeout.toMillis(); // Can't use File.isFile() on Windows, but luckily there's an even simpler check (that's not possible on *nix) @@ -225,7 +225,7 @@ private OutputStream openNamedPipeOutputStreamWindows(Path file, Duration timeou * @return A stream opened to read from the named pipe. * @throws IOException if the named pipe cannot be opened. */ - private OutputStream openNamedPipeOutputStreamUnix(Path file, Duration timeout) throws IOException { + private static OutputStream openNamedPipeOutputStreamUnix(Path file, Duration timeout) throws IOException { long timeoutMillisRemaining = timeout.toMillis(); // Periodically check whether the file exists until the timeout expires, then, if @@ -263,7 +263,7 @@ private OutputStream openNamedPipeOutputStreamUnix(Path file, Duration timeout) * it wrapped in a RuntimeException. However, the privileged calls could also possibly throw other * RuntimeExceptions, so this method accounts for this case too. */ - private void propagatePrivilegedException(RuntimeException e) throws IOException { + private static void propagatePrivilegedException(RuntimeException e) throws IOException { Throwable ioe = ExceptionsHelper.unwrap(e, IOException.class); if (ioe != null) { throw (IOException) ioe; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/WrappedBatchedJobsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/WrappedBatchedJobsIterator.java index f96acaa024562..0b25e1c49a815 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/WrappedBatchedJobsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/WrappedBatchedJobsIterator.java @@ -55,7 +55,7 @@ public Job next() { return currentBatch.hasNext() ? currentBatch.next() : null; } - private VolatileCursorIterator createBatchIteratorFromBatch(Deque builders) { + private static VolatileCursorIterator createBatchIteratorFromBatch(Deque builders) { List jobs = builders.stream().map(Job.Builder::build).collect(Collectors.toList()); return new VolatileCursorIterator<>(jobs); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java index 8f276829e242b..71a2abd28259a 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/action/TransportMonitoringMigrateAlertsAction.java @@ -177,7 +177,7 @@ private void afterSettingUpdate( * @param remaining The counter used to determine if any other operations are in flight * @param results A thread-safe collection to hold results */ - private ActionListener resultCollector( + private static ActionListener resultCollector( final Exporter.Config exporterConfig, final ActionListener listener, final AtomicInteger remaining, @@ -224,7 +224,7 @@ private void finalResult() { } } - private Exception compileReason(ExporterResourceStatus status) { + private static Exception compileReason(ExporterResourceStatus status) { // The reason for unsuccessful setup could be multiple exceptions: one or more watches // may fail to be removed for any reason. List exceptions = status.getExceptions(); @@ -247,7 +247,7 @@ private Exception compileReason(ExporterResourceStatus status) { * @param exporter The exporter to migrate * @param listener Notified of success or failure */ - private void deleteAlertsFromOpenExporter(Exporter exporter, ActionListener listener) { + private static void deleteAlertsFromOpenExporter(Exporter exporter, ActionListener listener) { assert exporter.isOpen(); try { exporter.removeAlerts(status -> { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java index c7b342ab53cc8..1eda86b030464 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java @@ -77,7 +77,7 @@ protected void doClose() { logger.debug("cleaning service closed"); } - private String executorName() { + private static String executorName() { return ThreadPool.Names.GENERIC; } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollector.java index 276935e599fd2..6102fdcbf99f0 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/shards/ShardsCollector.java @@ -98,7 +98,7 @@ protected Collection doCollect(final MonitoringDoc.Node node, fin return Collections.unmodifiableCollection(results); } - private String[] expandIndexPattern(String[] indicesToMonitor, String[] indices) { + private static String[] expandIndexPattern(String[] indicesToMonitor, String[] indices) { final Set expandedIndices = new HashSet<>(); for (String indexOrPattern : indicesToMonitor) { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java index 9e0067c568979..8c8d76897fd7f 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java @@ -282,7 +282,7 @@ public void export(final Collection docs, final ActionListener docs, final ActionListener listener) { + private static void doExport(final ExportBulk bulk, final Collection docs, final ActionListener listener) { final AtomicReference exceptionRef = new AtomicReference<>(); try { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java index 9255b68881606..d37f4669484a0 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java @@ -531,10 +531,10 @@ protected boolean alwaysReplaceResource(final Response response) { } private void addDefaultParameters(final Request request) { - this.addParameters(request, defaultParameters); + PublishableHttpResource.addParameters(request, defaultParameters); } - private void addParameters(final Request request, final Map parameters) { + private static void addParameters(final Request request, final Map parameters) { for (final Map.Entry param : parameters.entrySet()) { request.addParameter(param.getKey(), param.getValue()); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResource.java index d43225d595124..c68b4da8a049e 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/WatcherExistsHttpResource.java @@ -132,7 +132,7 @@ private void checkXPackForWatcher(final RestClient client, final ActionListener< * @throws IOException if any issue occurs while parsing the {@code xContent} {@code response}. * @throws RuntimeException if the response format is changed. */ - private boolean canUseWatcher(final Response response, final XContent xContent) throws IOException { + private static boolean canUseWatcher(final Response response, final XContent xContent) throws IOException { // no named content used; so EMPTY is fine final Map xpackInfo = XContentHelper.convertToMap(xContent, response.getEntity().getContent(), false); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java index f753de62ba157..467378f4cd738 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java @@ -468,7 +468,7 @@ private void responseReceived( } } - private boolean hasTemplate(final ClusterState clusterState, final String templateName) { + private static boolean hasTemplate(final ClusterState clusterState, final String templateName) { final IndexTemplateMetadata template = clusterState.getMetadata().getTemplates().get(templateName); return template != null && hasValidVersion(template.getVersion(), MonitoringTemplateRegistry.REGISTRY_VERSION); @@ -481,7 +481,7 @@ private boolean hasTemplate(final ClusterState clusterState, final String templa * @param minimumVersion The minimum version required to be a "valid" version * @return {@code true} if the version exists and it's >= to the minimum version. {@code false} otherwise. */ - private boolean hasValidVersion(final Object version, final long minimumVersion) { + private static boolean hasValidVersion(final Object version, final long minimumVersion) { return version instanceof Number && ((Number) version).intValue() >= minimumVersion; } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java index e947bb58e1b5c..343353a9b3a39 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java @@ -114,7 +114,7 @@ public boolean supportsContentStream() { * @param version the system API version * @return true if supported, false otherwise */ - private boolean isSupportedSystemVersion(final MonitoredSystem system, final String version) { + private static boolean isSupportedSystemVersion(final MonitoredSystem system, final String version) { final List monitoredSystem = SUPPORTED_API_VERSIONS.getOrDefault(system, emptyList()); return monitoredSystem.contains(version); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java index 72fed7376bde5..c25733bf8587c 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java @@ -156,7 +156,7 @@ public Iterator toXContentChunked(ToXContent.Params params ); } - private Iterator optional( + private static Iterator optional( String name, Map values, BiFunction, Iterator> supplier diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/analyzer/AnalyzerRules.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/analyzer/AnalyzerRules.java index 0c5bc8b0a97fe..1c5e91f2a76e1 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/analyzer/AnalyzerRules.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/analyzer/AnalyzerRules.java @@ -63,7 +63,7 @@ protected LogicalPlan rule(Filter filter) { return filter; } - private Expression replaceRawBoolFieldWithEquals(Expression e) { + private static Expression replaceRawBoolFieldWithEquals(Expression e) { if (e instanceof FieldAttribute && e.dataType() == BOOLEAN) { e = new Equals(e.source(), e, Literal.of(e, Boolean.TRUE)); } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/analyzer/PreAnalyzer.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/analyzer/PreAnalyzer.java index 604ea0ad08a32..e36eacc8c61c4 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/analyzer/PreAnalyzer.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/analyzer/PreAnalyzer.java @@ -38,7 +38,7 @@ public PreAnalysis preAnalyze(LogicalPlan plan) { return doPreAnalyze(plan); } - protected PreAnalysis doPreAnalyze(LogicalPlan plan) { + private static PreAnalysis doPreAnalyze(LogicalPlan plan) { List indices = new ArrayList<>(); plan.forEachUp(UnresolvedRelation.class, p -> indices.add(new TableInfo(p.table(), p.frozen()))); diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java index 2e0caf0ff1aba..5e0aa654392e7 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java @@ -207,7 +207,7 @@ protected Object unwrapFieldsMultiValue(Object values) { return values; } - private boolean isListOfNulls(Object unwrapped) { + private static boolean isListOfNulls(Object unwrapped) { if (unwrapped instanceof List list) { if (list.size() == 0) { return false; diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/TotalHitsExtractor.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/TotalHitsExtractor.java index d5c091c51c8c6..dddf58c894c52 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/TotalHitsExtractor.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/TotalHitsExtractor.java @@ -34,7 +34,7 @@ public Object extract(SearchHit hit) { return validate(super.extract(hit)); } - private Object validate(Object value) { + private static Object validate(Object value) { if (Number.class.isInstance(value) == false) { throw new QlIllegalArgumentException( "Inconsistent total hits count handling, expected a numeric value but found a {}: {}", diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java index 1fae7173b0e7b..aa90eeba43912 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java @@ -295,7 +295,7 @@ private void resolveRemoteIndices( } } - private void filterResults(String javaRegex, Set indexInfos, ActionListener> listener) { + private static void filterResults(String javaRegex, Set indexInfos, ActionListener> listener) { // since the index name does not support ?, filter the results manually Pattern pattern = javaRegex != null ? Pattern.compile(javaRegex) : null; @@ -610,7 +610,7 @@ public void resolveAsSeparateMappings( } - private GetAliasesRequest createGetAliasesRequest(FieldCapabilitiesResponse response, boolean includeFrozen) { + private static GetAliasesRequest createGetAliasesRequest(FieldCapabilitiesResponse response, boolean includeFrozen) { return new GetAliasesRequest().local(true) .aliases("*") .indices(response.getIndices()) diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java index 76cf1b6690335..5d9736726b46f 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java @@ -144,7 +144,7 @@ public Expression rule(ScalarFunction e) { return e; } - private Expression simplifyAndOr(BinaryPredicate bc) { + private static Expression simplifyAndOr(BinaryPredicate bc) { Expression l = bc.left(); Expression r = bc.right(); @@ -339,7 +339,7 @@ public Expression rule(BinaryLogic e) { } // combine conjunction - private Expression propagate(And and) { + private static Expression propagate(And and) { List ranges = new ArrayList<>(); // Only equalities, not-equalities and inequalities with a foldable .right are extracted separately; // the others go into the general 'exps'. @@ -479,7 +479,7 @@ private Expression propagate(And and) { // a = 2 OR a < 3 -> a < 3; a = 2 OR a < 1 -> nop // a = 2 OR 3 < a < 5 -> nop; a = 2 OR 1 < a < 3 -> 1 < a < 3; a = 2 OR 0 < a < 1 -> nop // a = 2 OR a != 2 -> TRUE; a = 2 OR a = 5 -> nop; a = 2 OR a != 5 -> a != 5 - private Expression propagate(Or or) { + private static Expression propagate(Or or) { List exps = new ArrayList<>(); List equals = new ArrayList<>(); // foldable right term Equals List notEquals = new ArrayList<>(); // foldable right term NotEquals @@ -652,7 +652,7 @@ public Expression rule(BinaryLogic e) { } // combine conjunction - private Expression combine(And and) { + private static Expression combine(And and) { List ranges = new ArrayList<>(); List bcs = new ArrayList<>(); List exps = new ArrayList<>(); @@ -764,7 +764,7 @@ else if ((other instanceof GreaterThan || other instanceof GreaterThanOrEqual) } // combine disjunction - private Expression combine(Or or) { + private static Expression combine(Or or) { List bcs = new ArrayList<>(); List ranges = new ArrayList<>(); List exps = new ArrayList<>(); @@ -912,7 +912,7 @@ private static boolean findExistingRange(Range main, List ranges, boolean return false; } - private boolean findConjunctiveComparisonInRange(BinaryComparison main, List ranges) { + private static boolean findConjunctiveComparisonInRange(BinaryComparison main, List ranges) { Object value = main.right().fold(); // NB: the loop modifies the list (hence why the int is used) @@ -1789,11 +1789,11 @@ public static final class SetAsOptimized extends Rule @Override public LogicalPlan apply(LogicalPlan plan) { - plan.forEachUp(this::rule); + plan.forEachUp(SetAsOptimized::rule); return plan; } - private void rule(LogicalPlan plan) { + private static void rule(LogicalPlan plan) { if (plan.optimized() == false) { plan.setOptimized(); } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/plan/QueryPlan.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/plan/QueryPlan.java index a2802e0463816..fa38e87612d5d 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/plan/QueryPlan.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/plan/QueryPlan.java @@ -108,7 +108,7 @@ public PlanType transformExpressionsUp(Class typeToken } @SuppressWarnings("unchecked") - private Object doTransformExpression(Object arg, Function traversal) { + private static Object doTransformExpression(Object arg, Function traversal) { if (arg instanceof Expression) { return traversal.apply((Expression) arg); } @@ -163,7 +163,7 @@ public void forEachExpressionUp(Class typeToken, Consu } @SuppressWarnings("unchecked") - private void doForEachExpression(Object arg, Consumer traversal) { + private static void doForEachExpression(Object arg, Consumer traversal) { if (arg instanceof Expression) { traversal.accept((Expression) arg); } else if (arg instanceof Collection c) { diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/tree/Node.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/tree/Node.java index 1d9132d041670..cb0233429f323 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/tree/Node.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/tree/Node.java @@ -407,13 +407,13 @@ public String propertiesToString(boolean skipIfChild) { return sb.toString(); } - private String toString(Object obj) { + private static String toString(Object obj) { StringBuilder sb = new StringBuilder(); toString(sb, obj); return sb.toString(); } - private void toString(StringBuilder sb, Object obj) { + private static void toString(StringBuilder sb, Object obj) { if (obj instanceof Iterable) { sb.append("["); for (Iterator it = ((Iterable) obj).iterator(); it.hasNext();) { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 0cb65ae82baf0..57bdebafd1b20 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -151,7 +151,7 @@ protected IterationResult> doProcess(SearchResponse searchRe * @param config The config for the job. * @return The composite aggregation that creates the rollup buckets */ - private CompositeAggregationBuilder createCompositeBuilder(RollupJobConfig config) { + private static CompositeAggregationBuilder createCompositeBuilder(RollupJobConfig config) { final GroupConfig groupConfig = config.getGroupConfig(); List> builders = createValueSourceBuilders(groupConfig); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java index f1d58f2f0ca17..f95b44f1d2e22 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java @@ -330,7 +330,7 @@ private AllocateUnassignedDecision decideAllocation(RoutingAllocation allocation return AllocateUnassignedDecision.NOT_TAKEN; } - private boolean isDelayedDueToNodeRestart(RoutingAllocation allocation, ShardRouting shardRouting) { + private static boolean isDelayedDueToNodeRestart(RoutingAllocation allocation, ShardRouting shardRouting) { if (shardRouting.unassignedInfo().isDelayed()) { String lastAllocatedNodeId = shardRouting.unassignedInfo().getLastAllocatedNodeId(); if (lastAllocatedNodeId != null) { diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java index 46ecfb0c33ea7..29828fba085d8 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java @@ -844,7 +844,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce } } - private String initialMasterNodesSettingValue(Environment environment) { + private static String initialMasterNodesSettingValue(Environment environment) { if (NODE_NAME_SETTING.exists(environment.settings())) { return "[\"" + NODE_NAME_SETTING.get(environment.settings()) + "\"]"; } @@ -932,8 +932,13 @@ private Environment possiblyReconfigureNode(Environment env, Terminal terminal, } } - private void notifyOfFailure(boolean inEnrollmentMode, Terminal terminal, Terminal.Verbosity verbosity, int exitCode, String message) - throws UserException { + private static void notifyOfFailure( + boolean inEnrollmentMode, + Terminal terminal, + Terminal.Verbosity verbosity, + int exitCode, + String message + ) throws UserException { if (inEnrollmentMode) { throw new UserException(exitCode, message); } else { @@ -942,11 +947,11 @@ private void notifyOfFailure(boolean inEnrollmentMode, Terminal terminal, Termin } } - private void deleteDirectory(Path directory) throws IOException { + private static void deleteDirectory(Path directory) throws IOException { IOUtils.rm(directory); } - private void moveDirectory(Path srcDir, Path dstDir) throws IOException { + private static void moveDirectory(Path srcDir, Path dstDir) throws IOException { try { Files.move(srcDir, dstDir, StandardCopyOption.ATOMIC_MOVE); } catch (AtomicMoveNotSupportedException e) { @@ -954,7 +959,7 @@ private void moveDirectory(Path srcDir, Path dstDir) throws IOException { } } - private GeneralNames getSubjectAltNames(Settings settings) throws IOException { + private static GeneralNames getSubjectAltNames(Settings settings) throws IOException { Set generalNameSet = new HashSet<>(); for (InetAddress ip : NetworkUtils.getAllAddresses()) { String ipString = NetworkAddress.format(ip); @@ -1079,7 +1084,7 @@ void checkExistingConfiguration(Settings settings, boolean inEnrollmentMode, Ter // Here we take a conservative approach: if any of the discovery or initial master nodes setting are set to a non-empty // value, we assume the admin intended a multi-node cluster configuration. There is only one exception: if the initial master // nodes setting contains just the current node name. - private boolean isInitialClusterNode(Settings settings) { + private static boolean isInitialClusterNode(Settings settings) { return DiscoveryModule.isSingleNodeDiscovery(settings) || (ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(settings).isEmpty() && SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.get(settings).isEmpty() @@ -1148,7 +1153,7 @@ private static boolean isDirEmpty(Path path) throws IOException { } } - private X509Certificate parseCertificateFromPem(String pemFormattedCert, Terminal terminal) throws Exception { + private static X509Certificate parseCertificateFromPem(String pemFormattedCert, Terminal terminal) throws Exception { try { final List certs = CertParsingUtils.readCertificates( Base64.getDecoder().wrap(new ByteArrayInputStream(pemFormattedCert.getBytes(StandardCharsets.UTF_8))) @@ -1174,7 +1179,7 @@ private X509Certificate parseCertificateFromPem(String pemFormattedCert, Termina } } - private PrivateKey parseKeyFromPem(String pemFormattedKey, Terminal terminal) throws UserException { + private static PrivateKey parseKeyFromPem(String pemFormattedKey, Terminal terminal) throws UserException { try { return parsePKCS8PemString(pemFormattedKey); } catch (Exception e) { @@ -1195,7 +1200,7 @@ private PrivateKey parseKeyFromPem(String pemFormattedKey, Terminal terminal) th } @SuppressWarnings("unchecked") - private List getTransportAddresses(Map responseMap) { + private static List getTransportAddresses(Map responseMap) { return (List) responseMap.get("nodes_addresses"); } @@ -1254,7 +1259,7 @@ static List removePreviousAutoconfiguration(List existingConfigL return existingConfigLines; } - private void removeAutoConfigurationFromKeystore(Environment env, Terminal terminal) throws UserException { + private static void removeAutoConfigurationFromKeystore(Environment env, Terminal terminal) throws UserException { if (Files.exists(KeyStoreWrapper.keystorePath(env.configFile()))) { try ( KeyStoreWrapper existingKeystore = KeyStoreWrapper.load(env.configFile()); diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index 57de814a7e7ee..82a6a5fc55c13 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -852,7 +852,7 @@ void generateAndWriteSignedCertificates( } } - private CertificateAndKey generateCertificateAndKey( + private static CertificateAndKey generateCertificateAndKey( CertificateInformation certificateInformation, CAInfo caInfo, int keySize, diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java index 14af042e98e68..b67bb9898991f 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java @@ -312,7 +312,7 @@ private void writeZip(Path file, char[] password, CertificateTool.CAInfo caInfo, } } - private void createZipDirectory(ZipOutputStream zip, String name) throws IOException { + private static void createZipDirectory(ZipOutputStream zip, String name) throws IOException { ZipEntry entry = new ZipEntry(name + "/"); assert entry.isDirectory(); zip.putNextEntry(entry); @@ -502,7 +502,7 @@ static void copyWithSubstitutions(InputStream stream, PrintWriter writer, Map buildSubstitutions(Environment env, Map entries) { + private static Map buildSubstitutions(Environment env, Map entries) { final Map map = Maps.newMapWithExpectedSize(entries.size() + 4); ZonedDateTime now = ZonedDateTime.now().withNano(0); map.put("DATE", now.format(DateTimeFormatter.ISO_LOCAL_DATE)); @@ -542,14 +542,14 @@ private void writePemEntry(ZipOutputStream zip, String name, PemObjectGenerator } } - private JcaMiscPEMGenerator generator(PrivateKey privateKey, char[] password) throws IOException { + private static JcaMiscPEMGenerator generator(PrivateKey privateKey, char[] password) throws IOException { if (password == null || password.length == 0) { return new JcaMiscPEMGenerator(privateKey); } return new JcaMiscPEMGenerator(privateKey, CertificateTool.getEncrypter(password)); } - private Period getCertificateValidityPeriod(Terminal terminal) { + private static Period getCertificateValidityPeriod(Terminal terminal) { printHeader("How long should your certificates be valid?", terminal); terminal.println("Every certificate has an expiry date. When the expiry date is reached clients"); terminal.println("will stop trusting your certificate and TLS connections will fail."); @@ -566,7 +566,7 @@ private Period getCertificateValidityPeriod(Terminal terminal) { return readPeriodInput(terminal, "For how long should your certificate be valid?", DEFAULT_CERT_VALIDITY, 60); } - private boolean askMultipleCertificates(Terminal terminal) { + private static boolean askMultipleCertificates(Terminal terminal) { printHeader("Do you wish to generate one certificate per node?", terminal); terminal.println("If you have multiple nodes in your cluster, then you may choose to generate a"); terminal.println("separate certificate for each of these nodes. Each certificate will have its"); @@ -627,7 +627,7 @@ private CertOptions getCertificateConfiguration( terminal.println("When you are done, press once more to move on to the next step."); terminal.println(""); - dnsNames.addAll(readMultiLineInput(terminal, this::validateHostname)); + dnsNames.addAll(readMultiLineInput(terminal, HttpCertificateCommand::validateHostname)); if (dnsNames.isEmpty()) { terminal.println(Terminal.Verbosity.SILENT, "You did not enter any hostnames."); terminal.println("Clients are likely to encounter TLS hostname verification errors if they"); @@ -660,7 +660,7 @@ private CertOptions getCertificateConfiguration( terminal.println("When you are done, press once more to move on to the next step."); terminal.println(""); - ipNames.addAll(readMultiLineInput(terminal, this::validateIpAddress)); + ipNames.addAll(readMultiLineInput(terminal, HttpCertificateCommand::validateIpAddress)); if (ipNames.isEmpty()) { terminal.println(Terminal.Verbosity.SILENT, "You did not enter any IP addresses."); } else { @@ -741,7 +741,7 @@ private CertOptions getCertificateConfiguration( return new CertOptions(certName, dn, dnsNames, ipNames, keySize, validity); } - private String validateHostname(String name) { + private static String validateHostname(String name) { if (DERIA5String.isIA5String(name)) { return null; } else { @@ -749,7 +749,7 @@ private String validateHostname(String name) { } } - private String validateIpAddress(String ip) { + private static String validateIpAddress(String ip) { if (InetAddresses.isInetAddress(ip)) { return null; } else { @@ -757,11 +757,11 @@ private String validateIpAddress(String ip) { } } - private X500Principal buildDistinguishedName(String name) { + private static X500Principal buildDistinguishedName(String name) { return new X500Principal("CN=" + name.replace(".", ",DC=")); } - private List readMultiLineInput(Terminal terminal, Function validator) { + private static List readMultiLineInput(Terminal terminal, Function validator) { final List lines = new ArrayList<>(); while (true) { String input = terminal.readText(""); @@ -779,7 +779,7 @@ private List readMultiLineInput(Terminal terminal, Function { String periodInput = input.replaceAll("[,\\s]", ""); if (input.charAt(0) != 'P') { @@ -955,7 +955,7 @@ Period readPeriodInput(Terminal terminal, String prompt, Period defaultValue, in return period; } - private Integer readKeySize(Terminal terminal, int keySize) { + private static Integer readKeySize(Terminal terminal, int keySize) { return tryReadInput(terminal, "Key Size", keySize, input -> { try { final int size = Integer.parseInt(input); @@ -979,7 +979,7 @@ private Integer readKeySize(Terminal terminal, int keySize) { }); } - private char[] readPassword(Terminal terminal, String prompt, boolean confirm) { + private static char[] readPassword(Terminal terminal, String prompt, boolean confirm) { while (true) { final char[] password = terminal.readSecret(prompt + " [ for none]"); if (password.length == 0) { @@ -1003,7 +1003,7 @@ private char[] readPassword(Terminal terminal, String prompt, boolean confirm) { } } - private CertificateTool.CAInfo readKeystoreCA(Path ksPath, FileType fileType, Terminal terminal) throws UserException { + private static CertificateTool.CAInfo readKeystoreCA(Path ksPath, FileType fileType, Terminal terminal) throws UserException { final String storeType = fileType == FileType.PKCS12 ? "PKCS12" : "jks"; terminal.println("Reading a " + storeType + " keystore requires a password."); terminal.println("It is possible for the keystore's password to be blank,"); @@ -1029,13 +1029,13 @@ private CertificateTool.CAInfo readKeystoreCA(Path ksPath, FileType fileType, Te } } - private CertificateTool.CAInfo readPemCA(Path certPath, Path keyPath, Terminal terminal) throws UserException { + private static CertificateTool.CAInfo readPemCA(Path certPath, Path keyPath, Terminal terminal) throws UserException { final X509Certificate cert = readCertificate(certPath, terminal); final PrivateKey key = readPrivateKey(keyPath, terminal); return new CertificateTool.CAInfo(cert, key); } - private X509Certificate readCertificate(Path path, Terminal terminal) throws UserException { + private static X509Certificate readCertificate(Path path, Terminal terminal) throws UserException { try { final X509Certificate[] certificates = CertParsingUtils.readX509Certificates(List.of(path)); switch (certificates.length) { @@ -1053,7 +1053,7 @@ private X509Certificate readCertificate(Path path, Terminal terminal) throws Use } } - private PrivateKey readPrivateKey(Path path, Terminal terminal) { + private static PrivateKey readPrivateKey(Path path, Terminal terminal) { try { return PemUtils.readPrivateKey(path, () -> { terminal.println(""); @@ -1066,7 +1066,7 @@ private PrivateKey readPrivateKey(Path path, Terminal terminal) { } } - private boolean askExistingCertificateAuthority(Terminal terminal) { + private static boolean askExistingCertificateAuthority(Terminal terminal) { printHeader("Do you have an existing Certificate Authority (CA) key-pair that you wish to use to sign your certificate?", terminal); terminal.println("If you have an existing CA certificate and key, then you can use that CA to"); terminal.println("sign your new http certificate. This allows you to use the same CA across"); @@ -1079,7 +1079,7 @@ private boolean askExistingCertificateAuthority(Terminal terminal) { return terminal.promptYesNo("Use an existing CA?", false); } - private T tryReadInput(Terminal terminal, String prompt, T defaultValue, Function parser) { + private static T tryReadInput(Terminal terminal, String prompt, T defaultValue, Function parser) { final String defaultStr = defaultValue instanceof Period ? toString((Period) defaultValue) : String.valueOf(defaultValue); while (true) { final String input = terminal.readText(prompt + " [" + defaultStr + "] "); @@ -1113,7 +1113,7 @@ static String toString(Period period) { return Strings.collectionToCommaDelimitedString(parts); } - private Path requestPath(String prompt, Terminal terminal, Environment env, boolean requireExisting) { + private static Path requestPath(String prompt, Terminal terminal, Environment env, boolean requireExisting) { for (;;) { final String input = terminal.readText(prompt); final Path path = env.configFile().resolve(input).toAbsolutePath(); @@ -1209,7 +1209,7 @@ static FileType guessFileType(Path path, Terminal terminal) { return FileType.UNRECOGNIZED; } - private void printHeader(String text, Terminal terminal) { + private static void printHeader(String text, Terminal terminal) { terminal.println(""); terminal.println(Terminal.Verbosity.SILENT, "## " + text); terminal.println(""); diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommandTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommandTests.java index 901e52cfe89a9..1033d4e51ebba 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommandTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommandTests.java @@ -516,56 +516,55 @@ public void testGenerateMultipleCertificateWithNewCA() throws Exception { } public void testParsingValidityPeriod() throws Exception { - final HttpCertificateCommand command = new HttpCertificateCommand(); final MockTerminal terminal = MockTerminal.create(); terminal.addTextInput("2y"); - assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofYears(2))); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", null, 1), is(Period.ofYears(2))); terminal.addTextInput("18m"); - assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofMonths(18))); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", null, 1), is(Period.ofMonths(18))); terminal.addTextInput("90d"); - assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofDays(90))); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", null, 1), is(Period.ofDays(90))); terminal.addTextInput("1y, 6m"); - assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofYears(1).withMonths(6))); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", null, 1), is(Period.ofYears(1).withMonths(6))); // Test: Re-prompt on bad input. terminal.addTextInput("2m & 4d"); terminal.addTextInput("2m 4d"); - assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofMonths(2).withDays(4))); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", null, 1), is(Period.ofMonths(2).withDays(4))); terminal.addTextInput("1y, 6m"); - assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.ofYears(1).withMonths(6))); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", null, 1), is(Period.ofYears(1).withMonths(6))); // Test: Accept default value final Period p = Period.of(randomIntBetween(1, 5), randomIntBetween(0, 11), randomIntBetween(0, 30)); terminal.addTextInput(""); - assertThat(command.readPeriodInput(terminal, "", p, 1), is(p)); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", p, 1), is(p)); final int y = randomIntBetween(1, 5); final int m = randomIntBetween(1, 11); final int d = randomIntBetween(1, 30); terminal.addTextInput(y + "y " + m + "m " + d + "d"); - assertThat(command.readPeriodInput(terminal, "", null, 1), is(Period.of(y, m, d))); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", null, 1), is(Period.of(y, m, d))); // Test: Minimum Days final int shortDays = randomIntBetween(1, 20); terminal.addTextInput(shortDays + "d"); terminal.addTextInput("y"); // I'm sure - assertThat(command.readPeriodInput(terminal, "", null, 21), is(Period.ofDays(shortDays))); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", null, 21), is(Period.ofDays(shortDays))); terminal.addTextInput(shortDays + "d"); terminal.addTextInput("n"); // I'm not sure terminal.addTextInput("30d"); - assertThat(command.readPeriodInput(terminal, "", null, 21), is(Period.ofDays(30))); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", null, 21), is(Period.ofDays(30))); terminal.addTextInput("2m"); terminal.addTextInput("n"); // I'm not sure terminal.addTextInput("2y"); - assertThat(command.readPeriodInput(terminal, "", null, 90), is(Period.ofYears(2))); + assertThat(HttpCertificateCommand.readPeriodInput(terminal, "", null, 90), is(Period.ofYears(2))); } public void testValidityPeriodToString() throws Exception { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 39b62da317b9b..fd38b40683f71 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -1824,7 +1824,6 @@ public UnaryOperator getRestHandlerInterceptor(ThreadContext thread threadContext, secondayAuthc.get(), auditTrailService.get(), - workflowService.get(), handler, operatorPrivilegesService.get() ); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java index 7e97a8b571679..9d367bf5caf24 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/apikey/TransportBaseUpdateApiKeyAction.java @@ -57,7 +57,8 @@ public final void doExecute(Task task, Request request, ActionListener abstract void doExecuteUpdate(Task task, Request request, Authentication authentication, ActionListener listener); - protected UpdateApiKeyResponse toSingleResponse(final String apiKeyId, final BulkUpdateApiKeyResponse response) throws Exception { + protected static UpdateApiKeyResponse toSingleResponse(final String apiKeyId, final BulkUpdateApiKeyResponse response) + throws Exception { if (response.getTotalResultCount() != 1) { throw new IllegalStateException( "single result required for single API key update but result count was [" + response.getTotalResultCount() + "]" @@ -84,7 +85,7 @@ protected UpdateApiKeyResponse toSingleResponse(final String apiKeyId, final Bul } } - private void throwIllegalStateExceptionOnIdMismatch(final String requestId, final String responseId) { + private static void throwIllegalStateExceptionOnIdMismatch(final String requestId, final String responseId) { final String message = "response ID [" + responseId + "] does not match request ID [" + requestId + "] for single API key update"; assert false : message; throw new IllegalStateException(message); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java index b7df9428df0ee..3d63364f85664 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/role/TransportGetRolesAction.java @@ -52,7 +52,7 @@ protected void doExecute(Task task, final GetRolesRequest request, final ActionL if (specificRolesRequested) { for (String role : requestedRoles) { if (ReservedRolesStore.isReserved(role)) { - RoleDescriptor rd = reservedRolesStore.roleDescriptor(role); + RoleDescriptor rd = ReservedRolesStore.roleDescriptor(role); if (rd != null) { roles.add(rd); } else { @@ -64,7 +64,7 @@ protected void doExecute(Task task, final GetRolesRequest request, final ActionL } } } else { - roles.addAll(reservedRolesStore.roleDescriptors()); + roles.addAll(ReservedRolesStore.roleDescriptors()); } if (specificRolesRequested && rolesToSearchFor.isEmpty()) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java index 2328c8478debc..852887767578f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java @@ -58,7 +58,7 @@ public String name() { return NAME; } - private Collection prepare(List roleMappings) { + private static Collection prepare(List roleMappings) { List requests = roleMappings.stream().map(rm -> PutRoleMappingRequest.fromMapping(rm)).toList(); var exceptions = new ArrayList(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlCompleteLogoutAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlCompleteLogoutAction.java index a1b4b1bd92aa1..0b76af3cf542e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlCompleteLogoutAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlCompleteLogoutAction.java @@ -56,7 +56,7 @@ protected void doExecute(Task task, SamlCompleteLogoutRequest request, ActionLis } } - private void processLogoutResponse( + private static void processLogoutResponse( SamlRealm samlRealm, SamlCompleteLogoutRequest request, ActionListener listener diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java index 22c3989199b5d..72d4c7f1d3a5e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionAction.java @@ -102,7 +102,7 @@ private void invalidateSession( } } - private String buildLogoutResponseUrl(SamlRealm realm, SamlLogoutRequestHandler.Result result) { + private static String buildLogoutResponseUrl(SamlRealm realm, SamlLogoutRequestHandler.Result result) { final LogoutResponse response = realm.buildLogoutResponse(result.getRequestId()); return new SamlRedirect(response, realm.getSigningConfiguration()).getRedirectUrl(result.getRelayState()); } @@ -136,7 +136,7 @@ private void findAndInvalidateTokens(SamlRealm realm, SamlLogoutRequestHandler.R ); } - private Predicate> containsMetadata(Map requiredMetadata) { + private static Predicate> containsMetadata(Map requiredMetadata) { return source -> { @SuppressWarnings("unchecked") Map actualMetadata = (Map) source.get("metadata"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java index 4c6a88586f8f6..d5f25dcf5773e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutAction.java @@ -120,7 +120,7 @@ private SamlLogoutResponse buildResponse(Authentication authentication, Map metadata, String key) { + private static String getMetadataString(Map metadata, String key) { final Object value = metadata.get(key); if (value == null) { if (metadata.containsKey(key)) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java index 18f67eba8fc44..dcc5b638f4d43 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlPrepareAuthenticationAction.java @@ -64,7 +64,11 @@ protected void doExecute( } } - private void prepareAuthentication(SamlRealm realm, String relayState, ActionListener listener) { + private static void prepareAuthentication( + SamlRealm realm, + String relayState, + ActionListener listener + ) { final AuthnRequest authnRequest = realm.buildAuthenticationRequest(); try { String redirectUrl = new SamlRedirect(authnRequest, realm.getSigningConfiguration()).getRedirectUrl(relayState); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 1b4f4b891c7ea..b28680f35e083 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -1275,7 +1275,8 @@ LogEntryBuilder withRequestBody(final BulkUpdateApiKeyRequest bulkUpdateApiKeyRe return this; } - private void withRequestBody(XContentBuilder builder, AbstractCreateApiKeyRequest abstractCreateApiKeyRequest) throws IOException { + private static void withRequestBody(XContentBuilder builder, AbstractCreateApiKeyRequest abstractCreateApiKeyRequest) + throws IOException { TimeValue expiration = abstractCreateApiKeyRequest.getExpiration(); builder.startObject("apikey") .field("id", abstractCreateApiKeyRequest.getId()) @@ -1293,21 +1294,23 @@ private void withRequestBody(XContentBuilder builder, AbstractCreateApiKeyReques builder.endObject(); // apikey } - private void withRequestBody(final XContentBuilder builder, final BaseSingleUpdateApiKeyRequest baseSingleUpdateApiKeyRequest) - throws IOException { + private static void withRequestBody( + final XContentBuilder builder, + final BaseSingleUpdateApiKeyRequest baseSingleUpdateApiKeyRequest + ) throws IOException { builder.startObject("apikey").field("id", baseSingleUpdateApiKeyRequest.getId()); withBaseUpdateApiKeyFields(builder, baseSingleUpdateApiKeyRequest); builder.endObject(); } - private void withRequestBody(final XContentBuilder builder, final BulkUpdateApiKeyRequest bulkUpdateApiKeyRequest) + private static void withRequestBody(final XContentBuilder builder, final BulkUpdateApiKeyRequest bulkUpdateApiKeyRequest) throws IOException { builder.startObject("apikeys").stringListField("ids", bulkUpdateApiKeyRequest.getIds()); withBaseUpdateApiKeyFields(builder, bulkUpdateApiKeyRequest); builder.endObject(); } - private void withBaseUpdateApiKeyFields(final XContentBuilder builder, final BaseUpdateApiKeyRequest baseUpdateApiKeyRequest) + private static void withBaseUpdateApiKeyFields(final XContentBuilder builder, final BaseUpdateApiKeyRequest baseUpdateApiKeyRequest) throws IOException { builder.field("type", baseUpdateApiKeyRequest.getType().value()); if (baseUpdateApiKeyRequest.getRoleDescriptors() != null) { @@ -1324,7 +1327,7 @@ private void withBaseUpdateApiKeyFields(final XContentBuilder builder, final Bas } } - private void withRoleDescriptor(XContentBuilder builder, RoleDescriptor roleDescriptor) throws IOException { + private static void withRoleDescriptor(XContentBuilder builder, RoleDescriptor roleDescriptor) throws IOException { builder.startObject().array(RoleDescriptor.Fields.CLUSTER.getPreferredName(), roleDescriptor.getClusterPrivileges()); if (roleDescriptor.getConditionalClusterPrivileges() != null && roleDescriptor.getConditionalClusterPrivileges().length > 0) { // This fails if this list contains multiple instances of the {@code ManageApplicationPrivileges} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index ce622ddf6fa69..897bfdcc6583e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -740,7 +740,7 @@ static XContentBuilder newDocument( * @return `null` if the update is a noop, i.e., if no changes to `currentApiKeyDoc` are required */ @Nullable - XContentBuilder maybeBuildUpdatedDocument( + static XContentBuilder maybeBuildUpdatedDocument( final String apiKeyId, final ApiKeyDoc currentApiKeyDoc, final Version targetDocVersion, @@ -799,7 +799,7 @@ XContentBuilder maybeBuildUpdatedDocument( return builder.endObject(); } - private boolean isNoop( + private static boolean isNoop( final String apiKeyId, final ApiKeyDoc apiKeyDoc, final Version targetDocVersion, @@ -999,7 +999,7 @@ public List parseRoleDescriptorsBytes( return parseRoleDescriptorsBytes(apiKeyId, bytesReference, roleType == RoleReference.ApiKeyRoleType.LIMITED_BY); } - private List parseRoleDescriptorsBytes( + private static List parseRoleDescriptorsBytes( final String apiKeyId, BytesReference bytesReference, final boolean replaceLegacySuperuserRoleDescriptor @@ -1494,7 +1494,7 @@ private IndexRequest maybeBuildIndexRequest( .request(); } - private void addErrorsForNotFoundApiKeys( + private static void addErrorsForNotFoundApiKeys( final BulkUpdateApiKeyResponse.Builder responseBuilder, final Collection foundDocs, final List requestedIds diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationService.java index 866bac68c33dd..9b0212910f4f5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationService.java @@ -196,7 +196,7 @@ private static void withRequestProcessingFailure( listener.onFailure(ese); } - private void writeAuthToContext( + private static void writeAuthToContext( final Authenticator.Context context, final Authentication authentication, final ActionListener listener diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index 0dd9bb6f551d4..0735eccff9913 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -422,7 +422,7 @@ private void addReservedRealm(List realms) { /** * Check that the given realmName is not yet used by the given list of realms. */ - private void ensureRealmNameIsAvailable(List realms, String realmName) { + private static void ensureRealmNameIsAvailable(List realms, String realmName) { assert realms.size() == realms.stream().map(Realm::name).collect(Collectors.toUnmodifiableSet()).size() : "existing realm names must be unique"; final Realm misNamedRealm = realms.stream().filter(realm -> realmName.equals(realm.name())).findFirst().orElse(null); @@ -462,7 +462,7 @@ private static void checkUniqueOrders(Map> orderToRealmName } } - private void ensureUniqueExplicitlyConfiguredRealmNames(Map> nameToRealmIdentifier) { + private static void ensureUniqueExplicitlyConfiguredRealmNames(Map> nameToRealmIdentifier) { String duplicateRealms = nameToRealmIdentifier.entrySet() .stream() .filter(entry -> entry.getValue().size() > 1) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordTool.java index b4e766fabf045..0718742d362cb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordTool.java @@ -128,7 +128,7 @@ protected void executeCommand(Terminal terminal, OptionSet options, Environment } } - private SecureString promptForPassword(Terminal terminal, String providedUsername) { + private static SecureString promptForPassword(Terminal terminal, String providedUsername) { while (true) { SecureString password1 = new SecureString(terminal.readSecret("Enter password for [" + providedUsername + "]: ")); Validation.Error err = Validation.Users.validatePassword(password1); @@ -150,7 +150,7 @@ private SecureString promptForPassword(Terminal terminal, String providedUsernam } } - private String requestBodySupplier(SecureString pwd) throws Exception { + private static String requestBodySupplier(SecureString pwd) throws Exception { XContentBuilder xContentBuilder = JsonXContent.contentBuilder(); xContentBuilder.startObject().field("password", pwd.toString()).endObject(); return Strings.toString(xContentBuilder); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java index 10ae8ac7da8c1..95f06051abb40 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java @@ -172,7 +172,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce ); } - private SecureString generatePassword(SecureRandom secureRandom, String user) { + private static SecureString generatePassword(SecureRandom secureRandom, String user) { int passwordLength = 20; // Generate 20 character passwords char[] characters = new char[passwordLength]; for (int i = 0; i < passwordLength; ++i) { @@ -181,7 +181,7 @@ private SecureString generatePassword(SecureRandom secureRandom, String user) { return new SecureString(characters); } - private void changedPasswordCallback(Terminal terminal, String user, SecureString password) { + private static void changedPasswordCallback(Terminal terminal, String user, SecureString password) { terminal.println("Changed password for user " + user + "\n" + "PASSWORD " + user + " = " + password + "\n"); } @@ -227,7 +227,7 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce ); } - private SecureString promptForPassword(Terminal terminal, String user) throws UserException { + private static SecureString promptForPassword(Terminal terminal, String user) throws UserException { // loop for two consecutive good passwords while (true) { SecureString password1 = new SecureString(terminal.readSecret("Enter password for [" + user + "]: ")); @@ -250,7 +250,7 @@ private SecureString promptForPassword(Terminal terminal, String user) throws Us } } - private void changedPasswordCallback(Terminal terminal, String user, SecureString password) { + private static void changedPasswordCallback(Terminal terminal, String user, SecureString password) { terminal.println("Changed password for user [" + user + "]"); } } @@ -620,7 +620,7 @@ void changePasswords( } } - private HttpResponseBuilder responseBuilder(InputStream is, Terminal terminal) throws IOException { + private static HttpResponseBuilder responseBuilder(InputStream is, Terminal terminal) throws IOException { HttpResponseBuilder httpResponseBuilder = new HttpResponseBuilder(); if (is != null) { byte[] bytes = toByteArray(is); @@ -633,12 +633,12 @@ private HttpResponseBuilder responseBuilder(InputStream is, Terminal terminal) t return httpResponseBuilder; } - private URL createURL(URL url, String path, String query) throws MalformedURLException, URISyntaxException { + private static URL createURL(URL url, String path, String query) throws MalformedURLException, URISyntaxException { return new URL(url, (url.toURI().getPath() + path).replaceAll("/+", "/") + query); } } - private byte[] toByteArray(InputStream is) throws IOException { + private static byte[] toByteArray(InputStream is) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); byte[] internalBuffer = new byte[1024]; int read = is.read(internalBuffer); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java index b51db1a138cde..273fe6c487439 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java @@ -185,7 +185,7 @@ public void authenticate(final AuthenticationToken token, final ActionListener 0) { return Base64.getEncoder().encodeToString(outToken); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java index a2bd793d48c3c..4e390c86ba1f1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapSessionFactory.java @@ -126,7 +126,7 @@ void loop() { * @param username username to insert into the DN template. Any commas, equals or plus will be escaped. * @return DN (distinguished name) build from the template. */ - String buildDnFromTemplate(String username, String template) { + static String buildDnFromTemplate(String username, String template) { // this value must be escaped to avoid manipulation of the template DN. String escapedUsername = escapedRDNValue(username); return new MessageFormat(template, Locale.ROOT).format(new Object[] { escapedUsername }, new StringBuffer(), null).toString(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetadataResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetadataResolver.java index c82066a15c593..14f78ba82fc27 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetadataResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/LdapMetadataResolver.java @@ -80,7 +80,7 @@ public void resolve( } } - private Attribute findAttribute(Collection attributes, String name) { + private static Attribute findAttribute(Collection attributes, String name) { return attributes.stream().filter(attr -> attr.getName().equals(name)).findFirst().orElse(null); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java index 840d2476285b6..5d260266d3f20 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/support/SessionFactory.java @@ -276,7 +276,7 @@ public boolean ssl() { /** * @param ldapUrls URLS in the form of "ldap://..." or "ldaps://..." */ - private boolean secureUrls(String[] ldapUrls) { + private static boolean secureUrls(String[] ldapUrls) { if (ldapUrls.length == 0) { return true; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java index 754d2a82dd835..0f34850b861b7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java @@ -393,7 +393,7 @@ private void validateResponseType(AuthenticationSuccessResponse response) { * @param expectedState The state that was originally generated * @param state The state that was contained in the response */ - private void validateState(State expectedState, State state) { + private static void validateState(State expectedState, State state) { if (null == state) { throw new ElasticsearchSecurityException("Failed to validate the response, the response did not contain a state parameter"); } else if (null == expectedState) { @@ -449,7 +449,7 @@ public void cancelled() { * of the Id Token and call the provided listener. * (This method is package-protected for testing purposes) */ - void handleUserinfoResponse( + static void handleUserinfoResponse( HttpResponse httpResponse, JWTClaimsSet verifiedIdTokenClaims, ActionListener claimsListener @@ -523,7 +523,11 @@ void handleUserinfoResponse( /** * Validates that the userinfo response contains a sub Claim and that this claim value is the same as the one returned in the ID Token */ - private void validateUserInfoResponse(JWTClaimsSet userInfoClaims, String expectedSub, ActionListener claimsListener) { + private static void validateUserInfoResponse( + JWTClaimsSet userInfoClaims, + String expectedSub, + ActionListener claimsListener + ) { if (userInfoClaims.getSubject().isEmpty()) { claimsListener.onFailure(new ElasticsearchSecurityException("Userinfo Response did not contain a sub Claim")); } else if (userInfoClaims.getSubject().equals(expectedSub) == false) { @@ -619,7 +623,7 @@ public void cancelled() { * Handle the Token Response from the OpenID Connect Provider. If successful, extract the (yet not validated) Id Token * and access token and call the provided listener. */ - private void handleTokenResponse(HttpResponse httpResponse, ActionListener> tokensListener) { + private static void handleTokenResponse(HttpResponse httpResponse, ActionListener> tokensListener) { try { final HttpEntity entity = httpResponse.getEntity(); final Header encodingHeader = entity.getContentEncoding(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java index efb230f1e06aa..ea5acc1379051 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java @@ -232,7 +232,7 @@ private void buildUserFromClaims(JWTClaimsSet claims, ActionListener return new SamlAttributes(nameId, session, attributes); } - private String getSessionIndex(Assertion assertion) { + private static String getSessionIndex(Assertion assertion) { return assertion.getAuthnStatements().stream().map(as -> as.getSessionIndex()).filter(Objects::nonNull).findFirst().orElse(null); } @@ -166,7 +166,7 @@ private Tuple> extractDetails( throw samlException("No assertions found in SAML response"); } - private void moveToNewDocument(XMLObject xmlObject) { + private static void moveToNewDocument(XMLObject xmlObject) { final Element element = xmlObject.getDOM(); final Document doc = element.getOwnerDocument().getImplementation().createDocument(null, null, null); doc.adoptNode(element); @@ -320,7 +320,10 @@ private void checkSubject(Subject assertionSubject, XMLObject parent, Collection checkSubjectInResponseTo(confirmationData.get(0), allowedSamlRequestIds); } - private void checkSubjectInResponseTo(SubjectConfirmationData subjectConfirmationData, Collection allowedSamlRequestIds) { + private static void checkSubjectInResponseTo( + SubjectConfirmationData subjectConfirmationData, + Collection allowedSamlRequestIds + ) { // Allow for IdP initiated SSO where InResponseTo MUST be missing if (Strings.hasText(subjectConfirmationData.getInResponseTo()) && allowedSamlRequestIds.contains(subjectConfirmationData.getInResponseTo()) == false) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandler.java index 4b61f13c85fca..6c5e6c2d80e2b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandler.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlLogoutRequestHandler.java @@ -116,7 +116,7 @@ private SAMLObject decrypt(EncryptedID encrypted) { } } - private String getSessionIndex(LogoutRequest logoutRequest) { + private static String getSessionIndex(LogoutRequest logoutRequest) { return logoutRequest.getSessionIndexes().stream().map(as -> as.getValue()).filter(Objects::nonNull).findFirst().orElse(null); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java index dcefcc27937b7..d9ea5ffd27b7b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMessageBuilder.java @@ -67,7 +67,7 @@ protected Issuer buildIssuer() { return issuer; } - protected String buildId() { + protected static String buildId() { // 20 bytes (160 bits) of randomness as recommended by the SAML spec return SamlUtils.generateSecureNCName(20); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java index 80d2697651e6c..106b550a1e23c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java @@ -364,7 +364,7 @@ private static PrivateKey readSigningKey(Path path, char[] password, Terminal te } } - private void validateXml(Terminal terminal, Path xml) throws Exception { + private static void validateXml(Terminal terminal, Path xml) throws Exception { try (InputStream xmlInput = Files.newInputStream(xml)) { SamlUtils.validate(xmlInput, METADATA_SCHEMA); terminal.println(Terminal.Verbosity.VERBOSE, "The generated metadata file conforms to the SAML metadata schema"); @@ -379,7 +379,7 @@ private void validateXml(Terminal terminal, Path xml) throws Exception { } } - private void printExceptions(Terminal terminal, Throwable throwable) { + private static void printExceptions(Terminal terminal, Throwable throwable) { terminal.errorPrintln(" - " + throwable.getMessage()); for (Throwable sup : throwable.getSuppressed()) { printExceptions(terminal, sup); @@ -390,11 +390,11 @@ private void printExceptions(Terminal terminal, Throwable throwable) { } @SuppressForbidden(reason = "CLI tool working from current directory") - private Path resolvePath(String name) { + private static Path resolvePath(String name) { return PathUtils.get(name).normalize(); } - private String requireText(Terminal terminal, String prompt) { + private static String requireText(Terminal terminal, String prompt) { String value = null; while (Strings.isNullOrEmpty(value)) { value = terminal.readText(prompt); @@ -402,7 +402,7 @@ private String requireText(Terminal terminal, String prompt) { return value; } - private T option(OptionSpec spec, OptionSet options, T defaultValue) { + private static T option(OptionSpec spec, OptionSet options, T defaultValue) { if (options.has(spec)) { return spec.value(options); } else { @@ -428,7 +428,7 @@ private Map getAttributeNames(OptionSet options, RealmConfig rea } // We sort this Set so that it is deterministic for testing - private SortedSet sorted(Set strings) { + private static SortedSet sorted(Set strings) { return new TreeSet<>(strings); } @@ -491,15 +491,15 @@ private RealmConfig findRealm(Terminal terminal, OptionSet options, Environment } } - private String optionName(OptionSpec spec) { + private static String optionName(OptionSpec spec) { return spec.options().get(0); } - private RealmConfig buildRealm(RealmConfig.RealmIdentifier identifier, Environment env, Settings globalSettings) { + private static RealmConfig buildRealm(RealmConfig.RealmIdentifier identifier, Environment env, Settings globalSettings) { return new RealmConfig(identifier, globalSettings, env, new ThreadContext(globalSettings)); } - private boolean isSamlRealm(RealmConfig.RealmIdentifier realmIdentifier) { + private static boolean isSamlRealm(RealmConfig.RealmIdentifier realmIdentifier) { return SamlRealmSettings.TYPE.equals(realmIdentifier.getType()); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlObjectHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlObjectHandler.java index 064df6a01ab4f..9f35b32f211dd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlObjectHandler.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlObjectHandler.java @@ -135,7 +135,7 @@ private KeyInfoCredentialResolver createResolverForEncryptionKeys() { return new ChainingKeyInfoCredentialResolver(Arrays.asList(localKeyInfoCredentialResolver, collectionKeyInfoCredentialResolver)); } - private EncryptedKeyResolver createResolverForEncryptedKeyElements() { + private static EncryptedKeyResolver createResolverForEncryptedKeyElements() { return new ChainingEncryptedKeyResolver( Arrays.asList( new InlineEncryptedKeyResolver(), @@ -149,7 +149,7 @@ protected SpConfiguration getSpConfiguration() { return sp; } - protected String describe(X509Certificate certificate) { + protected static String describe(X509Certificate certificate) { return "X509Certificate{Subject=" + certificate.getSubjectX500Principal() + "; SerialNo=" @@ -157,7 +157,7 @@ protected String describe(X509Certificate certificate) { + "}"; } - protected String describe(Collection credentials) { + protected static String describe(Collection credentials) { return credentials.stream().map(credential -> describe(credential.getEntityCertificate())).collect(Collectors.joining(",")); } @@ -262,7 +262,7 @@ private ElasticsearchSecurityException samlSignatureException(List c return samlException(msg, signature, describeCredentials(credentials)); } - private String describeCredentials(List credentials) { + private static String describeCredentials(List credentials) { return credentials.stream().map(c -> { if (c == null) { return ""; @@ -325,7 +325,7 @@ T buildXmlObject(Element element, Class type) { } } - protected String text(XMLObject xml, int length) { + protected static String text(XMLObject xml, int length) { return text(xml, length, 0); } @@ -435,7 +435,7 @@ protected byte[] decodeBase64(String content) { } } - protected byte[] inflate(byte[] bytes) { + protected static byte[] inflate(byte[] bytes) { Inflater inflater = new Inflater(true); try ( ByteArrayInputStream in = new ByteArrayInputStream(bytes); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java index d2b15c9a74249..856fdc4ba9555 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java @@ -644,7 +644,7 @@ public Map createTokenMetadata(SamlNameId nameId, String session return tokenMeta; } - private String resolveSingleValueAttribute(SamlAttributes attributes, AttributeParser parser, String name) { + private static String resolveSingleValueAttribute(SamlAttributes attributes, AttributeParser parser, String name) { final List list = parser.getAttribute(attributes); switch (list.size()) { case 0: diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRedirect.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRedirect.java index a883f83d1329f..4195d7d24a082 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRedirect.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRedirect.java @@ -74,11 +74,11 @@ private String withParameters(String queryParam) { } } - private String base64Encode(byte[] bytes) { + private static String base64Encode(byte[] bytes) { return Base64.getEncoder().encodeToString(bytes); } - private String urlEncode(String param) { + private static String urlEncode(String param) { return URLEncoder.encode(param, StandardCharsets.US_ASCII); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlResponseHandler.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlResponseHandler.java index ed3e88fa22ae6..484af268afca0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlResponseHandler.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlResponseHandler.java @@ -40,7 +40,7 @@ protected void checkInResponseTo(StatusResponseType response, Collection } } - protected String getStatusCodeMessage(Status status) { + protected static String getStatusCodeMessage(Status status) { StatusCode firstLevel = status.getStatusCode(); StatusCode subLevel = firstLevel.getStatusCode(); StringBuilder sb = new StringBuilder(); @@ -66,7 +66,7 @@ protected String getStatusCodeMessage(Status status) { return sb.toString(); } - protected void checkResponseDestination(StatusResponseType response, String spConfiguredUrl) { + protected static void checkResponseDestination(StatusResponseType response, String spConfiguredUrl) { if (spConfiguredUrl.equals(response.getDestination()) == false) { if (response.isSigned() || Strings.hasText(response.getDestination())) { throw samlException( @@ -81,7 +81,7 @@ protected void checkResponseDestination(StatusResponseType response, String spCo } } - protected void checkStatus(Status status) { + protected static void checkStatus(Status status) { if (status == null || status.getStatusCode() == null) { throw samlException("SAML Response has no status code"); } @@ -90,16 +90,16 @@ protected void checkStatus(Status status) { } } - protected boolean isSuccess(Status status) { + protected static boolean isSuccess(Status status) { return StatusCode.SUCCESS.equals(status.getStatusCode().getValue()); } - private String getMessage(Status status) { + private static String getMessage(Status status) { final StatusMessage sm = status.getStatusMessage(); return sm == null ? null : sm.getValue(); } - private String getDetail(Status status) { + private static String getDetail(Status status) { final StatusDetail sd = status.getStatusDetail(); return sd == null ? null : SamlUtils.toString(sd.getDOM()); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlSpMetadataBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlSpMetadataBuilder.java index 6df1fc571cc9a..7385ce02eb8e4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlSpMetadataBuilder.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlSpMetadataBuilder.java @@ -292,7 +292,7 @@ private ServiceName buildServiceName() { return name; } - private RequestedAttribute buildRequestedAttribute(String friendlyName, String name) { + private static RequestedAttribute buildRequestedAttribute(String friendlyName, String name) { final RequestedAttribute attribute = new RequestedAttributeBuilder().buildObject(); if (Strings.hasText(friendlyName)) { attribute.setFriendlyName(friendlyName); @@ -329,7 +329,7 @@ private List buildKeyDescriptors() throws CertificateEn return keys; } - private KeyDescriptor buildKeyDescriptor(X509Certificate certificate, UsageType usageType) throws CertificateEncodingException { + private static KeyDescriptor buildKeyDescriptor(X509Certificate certificate, UsageType usageType) throws CertificateEncodingException { final KeyDescriptor descriptor = new KeyDescriptorBuilder().buildObject(); descriptor.setUse(usageType); final KeyInfo keyInfo = new KeyInfoBuilder().buildObject(); @@ -357,7 +357,7 @@ private Organization buildOrganization() { return org; } - private ContactPerson buildContact(ContactInfo contact) { + private static ContactPerson buildContact(ContactInfo contact) { final GivenName givenName = new GivenNameBuilder().buildObject(); givenName.setValue(contact.givenName); final SurName surName = new SurNameBuilder().buildObject(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStore.java index a16eeb7cf4e0f..7749500465f65 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStore.java @@ -253,7 +253,7 @@ void deleteToken(DeleteServiceAccountTokenRequest request, ActionListener listener) { + public static void getRoleDescriptor(Authentication authentication, ActionListener listener) { assert authentication.isServiceAccount() : "authentication is not for service account: " + authentication; final String principal = authentication.getEffectiveSubject().getUser().principal(); getRoleDescriptorForPrincipal(principal, listener); } - public void getRoleDescriptorForPrincipal(String principal, ActionListener listener) { + public static void getRoleDescriptorForPrincipal(String principal, ActionListener listener) { final ServiceAccount account = ACCOUNTS.get(principal); if (account == null) { listener.onFailure( @@ -179,7 +179,7 @@ public void getRoleDescriptorForPrincipal(String principal, ActionListener> lookup.lookup(username, userListener); } - private List resolveRealms(Iterable allRealms, List lookupRealms) { + private static List resolveRealms(Iterable allRealms, List lookupRealms) { final List result = new ArrayList<>(lookupRealms.size()); for (String name : lookupRealms) { result.add(findRealm(name, allRealms)); @@ -150,7 +150,7 @@ private List resolveRealms(Iterable allRealms, List delegatedRealms, Settings globalSettings) { + private static void checkForRealmChains(Iterable delegatedRealms, Settings globalSettings) { for (Realm realm : delegatedRealms) { Setting> realmAuthzSetting = AUTHZ_REALMS.apply(realm.type()).getConcreteSettingForNamespace(realm.name()); if (realmAuthzSetting.exists(globalSettings)) { @@ -165,7 +165,7 @@ private void checkForRealmChains(Iterable delegatedRealms, Settings globa } } - private Realm findRealm(String name, Iterable allRealms) { + private static Realm findRealm(String name, Iterable allRealms) { for (Realm realm : allRealms) { if (name.equals(realm.name())) { return realm; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/restriction/WorkflowService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/restriction/WorkflowService.java index 8b23640f9ef94..e035d7fdabbac 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/restriction/WorkflowService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/restriction/WorkflowService.java @@ -23,7 +23,7 @@ public class WorkflowService { private static final String WORKFLOW_HEADER = "_xpack_security_workflow"; - public Workflow resolveWorkflowAndStoreInThreadContext(RestHandler restHandler, ThreadContext threadContext) { + public static Workflow resolveWorkflowAndStoreInThreadContext(RestHandler restHandler, ThreadContext threadContext) { Workflow workflow = resolveWorkflow(restHandler); if (workflow != null) { assert threadContext.getHeader(WORKFLOW_HEADER) == null @@ -33,11 +33,11 @@ public Workflow resolveWorkflowAndStoreInThreadContext(RestHandler restHandler, return workflow; } - public String readWorkflowFromThreadContext(ThreadContext threadContext) { + public static String readWorkflowFromThreadContext(ThreadContext threadContext) { return threadContext.getHeader(WORKFLOW_HEADER); } - private Workflow resolveWorkflow(RestHandler restHandler) { + private static Workflow resolveWorkflow(RestHandler restHandler) { final String restHandlerName = resolveRestHandlerName(restHandler); if (restHandlerName == null) { logger.trace(() -> format("unable to resolve name of REST handler [%s]", restHandler.getClass())); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index f4dbadbed588d..ab9ea8f772054 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -206,7 +206,7 @@ public void getRole(Subject subject, ActionListener roleActionListener) { assert false == subject.getUser() instanceof InternalUser : "Internal user [" + subject.getUser() + "] should not pass here"; final RoleReferenceIntersection roleReferenceIntersection = subject.getRoleReferenceIntersection(anonymousUser); - final String workflow = workflowService.readWorkflowFromThreadContext(threadContext); + final String workflow = WorkflowService.readWorkflowFromThreadContext(threadContext); roleReferenceIntersection.buildRole( this::buildRoleFromRoleReference, roleActionListener.delegateFailureAndWrap((l, role) -> l.onResponse(role.forWorkflow(workflow))) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java index bb737d094115c..ad4d0d4434622 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java @@ -130,7 +130,7 @@ public void resolveServiceAccountRoleReference( RoleReference.ServiceAccountRoleReference roleReference, ActionListener listener ) { - serviceAccountService.getRoleDescriptorForPrincipal(roleReference.getPrincipal(), listener.map(roleDescriptor -> { + ServiceAccountService.getRoleDescriptorForPrincipal(roleReference.getPrincipal(), listener.map(roleDescriptor -> { final RolesRetrievalResult rolesRetrievalResult = new RolesRetrievalResult(); rolesRetrievalResult.addDescriptors(Set.of(roleDescriptor)); return rolesRetrievalResult; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGenerator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGenerator.java index eff410bb2f06a..0a899bd70e619 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGenerator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGenerator.java @@ -75,7 +75,7 @@ protected EnrollmentToken create(String user, SecureString password, String acti return new EnrollmentToken(apiKey, fingerprint, httpInfo.v2(), httpInfo.v1()); } - private HttpResponse.HttpResponseBuilder responseBuilder(InputStream is) throws IOException { + private static HttpResponse.HttpResponseBuilder responseBuilder(InputStream is) throws IOException { final HttpResponse.HttpResponseBuilder httpResponseBuilder = new HttpResponse.HttpResponseBuilder(); if (is != null) { String responseBody = Streams.readFully(is).utf8ToString(); @@ -87,11 +87,11 @@ private HttpResponse.HttpResponseBuilder responseBuilder(InputStream is) throws return httpResponseBuilder; } - protected URL createAPIKeyUrl(URL baseUrl) throws MalformedURLException, URISyntaxException { + protected static URL createAPIKeyUrl(URL baseUrl) throws MalformedURLException, URISyntaxException { return new URL(baseUrl, (baseUrl.toURI().getPath() + "/_security/api_key").replaceAll("/+", "/")); } - protected URL getHttpInfoUrl(URL baseUrl) throws MalformedURLException, URISyntaxException { + protected static URL getHttpInfoUrl(URL baseUrl) throws MalformedURLException, URISyntaxException { return new URL(baseUrl, (baseUrl.toURI().getPath() + "/_nodes/_local/http").replaceAll("/+", "/")); } @@ -134,7 +134,7 @@ protected String getApiKeyCredentials(String user, SecureString password, String user, password, createApiKeyRequestBodySupplier, - is -> responseBuilder(is) + ExternalEnrollmentTokenGenerator::responseBuilder ); final int httpCode = httpResponseApiKey.getHttpStatus(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java index 85d8ad5305777..f7d5ada9b9538 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java @@ -35,7 +35,6 @@ public class SecurityRestFilter extends FilterRestHandler implements RestHandler private final AuditTrailService auditTrailService; private final boolean enabled; private final ThreadContext threadContext; - private final WorkflowService workflowService; private final OperatorPrivileges.OperatorPrivilegesService operatorPrivilegesService; public SecurityRestFilter( @@ -43,7 +42,6 @@ public SecurityRestFilter( ThreadContext threadContext, SecondaryAuthenticator secondaryAuthenticator, AuditTrailService auditTrailService, - WorkflowService workflowService, RestHandler restHandler, OperatorPrivileges.OperatorPrivilegesService operatorPrivilegesService ) { @@ -52,7 +50,6 @@ public SecurityRestFilter( this.threadContext = threadContext; this.secondaryAuthenticator = secondaryAuthenticator; this.auditTrailService = auditTrailService; - this.workflowService = workflowService; // can be null if security is not enabled this.operatorPrivilegesService = operatorPrivilegesService == null ? OperatorPrivileges.NOOP_OPERATOR_PRIVILEGES_SERVICE @@ -83,7 +80,8 @@ public void handleRequest(RestRequest request, RestChannel channel, NodeClient c if (secondaryAuthentication != null) { logger.trace("Found secondary authentication {} in REST request [{}]", secondaryAuthentication, request.uri()); } - workflowService.resolveWorkflowAndStoreInThreadContext(getConcreteRestHandler(), threadContext); + WorkflowService.resolveWorkflowAndStoreInThreadContext(getConcreteRestHandler(), threadContext); + doHandleRequest(request, channel, client); }, e -> handleException(request, channel, e))); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java index cdc2e7caceedc..3102ba4e4ee36 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenAction.java @@ -165,7 +165,7 @@ public void onFailure(Exception e) { } } - private String extractBase64EncodedToken(ElasticsearchSecurityException e) { + private static String extractBase64EncodedToken(ElasticsearchSecurityException e) { String base64EncodedToken = null; List values = e.getHeader(KerberosAuthenticationToken.WWW_AUTHENTICATE); if (values != null && values.size() == 1) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java index 9a47ff429c48c..09527b251b6e8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/saml/RestSamlAuthenticateAction.java @@ -115,7 +115,7 @@ public RestResponse buildResponse(SamlAuthenticateResponse response, XContentBui } } - private byte[] decodeBase64(String content) { + private static byte[] decodeBase64(String content) { content = content.replaceAll("\\s+", ""); try { return Base64.getDecoder().decode(content); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/CrossClusterAccessServerTransportFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/CrossClusterAccessServerTransportFilter.java index dfedc2c659033..9809127080dc5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/CrossClusterAccessServerTransportFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/CrossClusterAccessServerTransportFilter.java @@ -108,7 +108,7 @@ private void validateHeaders() { } } - private void ensureRequiredHeaderInContext(ThreadContext threadContext, String requiredHeader) { + private static void ensureRequiredHeaderInContext(ThreadContext threadContext, String requiredHeader) { if (threadContext.getHeader(requiredHeader) == null) { throw CrossClusterAccessAuthenticationService.requiredHeaderMissingException(requiredHeader); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java index 75126b205dcd6..bfd87326d4481 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/ServerTransportFilter.java @@ -40,7 +40,6 @@ class ServerTransportFilter { private final AuthenticationService authcService; private final AuthorizationService authzService; - private final SecurityActionMapper actionMapper = new SecurityActionMapper(); private final ThreadContext threadContext; private final boolean extractClientCert; private final DestructiveOperations destructiveOperations; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index a0a1b622cf36e..48184a4e3a9df 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -2192,7 +2192,7 @@ public void testMaybeBuildUpdatedDocument() throws IOException { when(request.getMetadata()).thenReturn(newMetadata); final var service = createApiKeyService(); - final XContentBuilder builder = service.maybeBuildUpdatedDocument( + final XContentBuilder builder = ApiKeyService.maybeBuildUpdatedDocument( apiKeyId, oldApiKeyDoc, newVersion, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java index 60f68d4104a99..057a55ea4708d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java @@ -935,7 +935,7 @@ public void testHandleUserinfoResponseSuccess() throws Exception { final PlainActionFuture future = new PlainActionFuture<>(); this.authenticator = buildAuthenticator(); - this.authenticator.handleUserinfoResponse(response, idClaims, future); + OpenIdConnectAuthenticator.handleUserinfoResponse(response, idClaims, future); final JWTClaimsSet finalClaims = future.get(); assertThat(finalClaims.getSubject(), equalTo(sub)); @@ -957,7 +957,7 @@ public void testHandleUserinfoResponseFailure() throws Exception { final PlainActionFuture future = new PlainActionFuture<>(); this.authenticator = buildAuthenticator(); - this.authenticator.handleUserinfoResponse(response, idClaims, future); + OpenIdConnectAuthenticator.handleUserinfoResponse(response, idClaims, future); final ElasticsearchSecurityException exception = expectThrows(ElasticsearchSecurityException.class, future::actionGet); assertThat( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java index cdf847e4356f7..0c40e3996d288 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/ServiceAccountServiceTests.java @@ -579,7 +579,7 @@ public void testGetRoleDescriptor() throws ExecutionException, InterruptedExcept ); final PlainActionFuture future1 = new PlainActionFuture<>(); - serviceAccountService.getRoleDescriptor(auth1, future1); + ServiceAccountService.getRoleDescriptor(auth1, future1); final RoleDescriptor roleDescriptor1 = future1.get(); assertNotNull(roleDescriptor1); assertThat(roleDescriptor1.getName(), equalTo("elastic/fleet-server")); @@ -594,7 +594,7 @@ public void testGetRoleDescriptor() throws ExecutionException, InterruptedExcept Map.of("_token_name", randomAlphaOfLengthBetween(3, 8), "_token_source", tokenSource.name().toLowerCase(Locale.ROOT)) ); final PlainActionFuture future2 = new PlainActionFuture<>(); - serviceAccountService.getRoleDescriptor(auth2, future2); + ServiceAccountService.getRoleDescriptor(auth2, future2); final ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, future2::actionGet); assertThat(e.getMessage(), containsString("cannot load role for service account [" + username + "] - no such service account")); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index d60858234e8fb..2420d7c2269a6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -1663,7 +1663,7 @@ public void testGetRoleDescriptorsForRemoteClusterForReservedRoles() { // superuser { final SimpleRole role = Role.buildFromRoleDescriptor( - reservedRolesStore.roleDescriptor("superuser"), + ReservedRolesStore.roleDescriptor("superuser"), fieldPermissionsCache, RESTRICTED_INDICES ); @@ -1699,7 +1699,7 @@ public void testGetRoleDescriptorsForRemoteClusterForReservedRoles() { // kibana_system { final SimpleRole role = Role.buildFromRoleDescriptor( - reservedRolesStore.roleDescriptor("kibana_system"), + ReservedRolesStore.roleDescriptor("kibana_system"), fieldPermissionsCache, RESTRICTED_INDICES ); @@ -1735,7 +1735,7 @@ public void testGetRoleDescriptorsForRemoteClusterForReservedRoles() { // monitoring_user { final SimpleRole role = Role.buildFromRoleDescriptor( - reservedRolesStore.roleDescriptor("monitoring_user"), + ReservedRolesStore.roleDescriptor("monitoring_user"), fieldPermissionsCache, RESTRICTED_INDICES ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/restriction/WorkflowServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/restriction/WorkflowServiceTests.java index 437ea041d4ee6..1fe0a9f8c7999 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/restriction/WorkflowServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/restriction/WorkflowServiceTests.java @@ -26,16 +26,14 @@ public class WorkflowServiceTests extends ESTestCase { - private final WorkflowService workflowService = new WorkflowService(); - public void testResolveWorkflowAndStoreInThreadContextWithKnownRestHandler() { final Workflow expectedWorkflow = randomFrom(WorkflowResolver.allWorkflows()); final RestHandler restHandler = new TestBaseRestHandler(randomFrom(expectedWorkflow.allowedRestHandlers())); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final Workflow actualWorkflow = workflowService.resolveWorkflowAndStoreInThreadContext(restHandler, threadContext); + final Workflow actualWorkflow = WorkflowService.resolveWorkflowAndStoreInThreadContext(restHandler, threadContext); assertThat(actualWorkflow, equalTo(expectedWorkflow)); - assertThat(workflowService.readWorkflowFromThreadContext(threadContext), equalTo(expectedWorkflow.name())); + assertThat(WorkflowService.readWorkflowFromThreadContext(threadContext), equalTo(expectedWorkflow.name())); } public void testResolveWorkflowAndStoreInThreadContextWithUnknownRestHandler() { @@ -51,9 +49,9 @@ public void testResolveWorkflowAndStoreInThreadContextWithUnknownRestHandler() { } final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final Workflow actualWorkflow = workflowService.resolveWorkflowAndStoreInThreadContext(restHandler, threadContext); + final Workflow actualWorkflow = WorkflowService.resolveWorkflowAndStoreInThreadContext(restHandler, threadContext); assertThat(actualWorkflow, nullValue()); - assertThat(workflowService.readWorkflowFromThreadContext(threadContext), nullValue()); + assertThat(WorkflowService.readWorkflowFromThreadContext(threadContext), nullValue()); } public static class TestBaseRestHandler extends BaseRestHandler { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 46a78f1055a6f..8a042a5921a45 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -2267,7 +2267,7 @@ public void testGetRoleForWorkflowWithRestriction() { // Tests that for a role with restriction, getRole returns: // 1. a usable role when originating workflow matches try (var ignored = threadContext.stashContext()) { - workflowService.resolveWorkflowAndStoreInThreadContext( + WorkflowService.resolveWorkflowAndStoreInThreadContext( new TestBaseRestHandler(randomFrom(workflow.allowedRestHandlers())), threadContext ); @@ -2282,7 +2282,7 @@ public void testGetRoleForWorkflowWithRestriction() { // 2. an "empty-restricted" role if originating workflow does not match (or is null) try (var ignored = threadContext.stashContext()) { - workflowService.resolveWorkflowAndStoreInThreadContext(new TestBaseRestHandler(randomAlphaOfLength(10)), threadContext); + WorkflowService.resolveWorkflowAndStoreInThreadContext(new TestBaseRestHandler(randomAlphaOfLength(10)), threadContext); final PlainActionFuture future1 = new PlainActionFuture<>(); compositeRolesStore.getRole(authentication1.getEffectiveSubject(), future1); @@ -2377,7 +2377,7 @@ public void testGetRoleForWorkflowWithoutRestriction() { try (var ignored = threadContext.stashContext()) { boolean useExistingWorkflowAsOriginating = randomBoolean(); Workflow existingWorkflow = randomFrom(WorkflowResolver.allWorkflows()); - workflowService.resolveWorkflowAndStoreInThreadContext( + WorkflowService.resolveWorkflowAndStoreInThreadContext( new TestBaseRestHandler( useExistingWorkflowAsOriginating ? randomFrom(existingWorkflow.allowedRestHandlers()) : randomAlphaOfLengthBetween(4, 8) ), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java index 339b3de9ecb49..669f67d80c1f8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java @@ -85,8 +85,8 @@ public void testCreateSuccess() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); final URL baseURL = new URL("http://localhost:9200"); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); + final URL createAPIKeyURL = ExternalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = ExternalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when(client.execute(anyString(), any(URL.class), anyString(), any(SecureString.class), anyCheckedSupplier(), anyCheckedFunction())) @@ -174,7 +174,7 @@ public void testFailedCreateApiKey() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); final URL baseURL = new URL("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL createAPIKeyURL = ExternalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); final HttpResponse httpResponseNotOK = new HttpResponse(HttpURLConnection.HTTP_BAD_REQUEST, new HashMap<>()); when( @@ -200,8 +200,8 @@ public void testFailedRetrieveHttpInfo() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); final URL baseURL = new URL("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); + final URL createAPIKeyURL = ExternalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = ExternalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when( @@ -278,8 +278,8 @@ public void testFailedNoCaInKeystore() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); final URL baseURL = new URL("http://localhost:9200"); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); + final URL createAPIKeyURL = ExternalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = ExternalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when( @@ -362,8 +362,8 @@ public void testFailedManyCaInKeystore() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); final URL baseURL = new URL("http://localhost:9200"); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); + final URL createAPIKeyURL = ExternalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = ExternalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java index 2547839f59b56..69884cd1e6dbd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java @@ -102,7 +102,6 @@ private SecurityRestFilter getFilter(OperatorPrivileges.OperatorPrivilegesServic threadContext, secondaryAuthenticator, new AuditTrailService(null, null), - new WorkflowService(), restHandler, privilegesService ); @@ -171,15 +170,7 @@ public void testProcessSecondaryAuthentication() throws Exception { } public void testProcessWithSecurityDisabled() throws Exception { - filter = new SecurityRestFilter( - false, - threadContext, - secondaryAuthenticator, - mock(AuditTrailService.class), - mock(WorkflowService.class), - restHandler, - null - ); + filter = new SecurityRestFilter(false, threadContext, secondaryAuthenticator, mock(AuditTrailService.class), restHandler, null); assertEquals(NOOP_OPERATOR_PRIVILEGES_SERVICE, filter.getOperatorPrivilegesService()); RestRequest request = mock(RestRequest.class); filter.handleRequest(request, channel, null); @@ -231,7 +222,6 @@ public Set getFilteredFields() { threadContext, secondaryAuthenticator, new AuditTrailService(auditTrail, licenseState), - new WorkflowService(), restHandler, NOOP_OPERATOR_PRIVILEGES_SERVICE ); @@ -298,19 +288,11 @@ public void testProcessWithWorkflow() throws Exception { restHandler = new TestBaseRestHandler(randomFrom(workflow.allowedRestHandlers())); final WorkflowService workflowService = new WorkflowService(); - filter = new SecurityRestFilter( - true, - threadContext, - secondaryAuthenticator, - new AuditTrailService(null, null), - workflowService, - restHandler, - null - ); + filter = new SecurityRestFilter(true, threadContext, secondaryAuthenticator, new AuditTrailService(null, null), restHandler, null); RestRequest request = mock(RestRequest.class); filter.handleRequest(request, channel, null); - assertThat(workflowService.readWorkflowFromThreadContext(threadContext), equalTo(workflow.name())); + assertThat(WorkflowService.readWorkflowFromThreadContext(threadContext), equalTo(workflow.name())); } public void testProcessWithoutWorkflow() throws Exception { @@ -325,19 +307,11 @@ public void testProcessWithoutWorkflow() throws Exception { } final WorkflowService workflowService = new WorkflowService(); - filter = new SecurityRestFilter( - true, - threadContext, - secondaryAuthenticator, - new AuditTrailService(null, null), - workflowService, - restHandler, - null - ); + filter = new SecurityRestFilter(true, threadContext, secondaryAuthenticator, new AuditTrailService(null, null), restHandler, null); RestRequest request = mock(RestRequest.class); filter.handleRequest(request, channel, null); - assertThat(workflowService.readWorkflowFromThreadContext(threadContext), nullValue()); + assertThat(WorkflowService.readWorkflowFromThreadContext(threadContext), nullValue()); } public void testCheckRest() throws Exception { diff --git a/x-pack/plugin/snapshot-based-recoveries/src/main/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerService.java b/x-pack/plugin/snapshot-based-recoveries/src/main/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerService.java index 4887419507c6f..5e93003b04aa9 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/main/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerService.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/main/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerService.java @@ -180,7 +180,7 @@ && isSnapshotVersionCompatible(latestSnapshot) ); } - private boolean isSnapshotVersionCompatible(ShardSnapshot snapshot) { + private static boolean isSnapshotVersionCompatible(ShardSnapshot snapshot) { IndexVersion commitVersion = snapshot.getCommitVersion(); // if the snapshotVersion == null that means that the snapshot was taken in a version <= 7.15, // therefore we can safely use that snapshot. Since this runs on the shard primary and @@ -193,7 +193,7 @@ private boolean isSnapshotVersionCompatible(ShardSnapshot snapshot) { return commitVersion.onOrBefore(IndexVersion.current()); } - private ShardRecoveryPlan getRecoveryPlanUsingSourceNode( + private static ShardRecoveryPlan getRecoveryPlanUsingSourceNode( Store.MetadataSnapshot sourceMetadata, Store.RecoveryDiff sourceTargetDiff, List filesMissingInTarget, @@ -233,7 +233,7 @@ public void onFailure(Exception e) { shardSnapshotsService.fetchLatestSnapshotsForShard(shardId, listenerIgnoringErrors); } - private Store.MetadataSnapshot toMetadataSnapshot(List files) { + private static Store.MetadataSnapshot toMetadataSnapshot(List files) { return new Store.MetadataSnapshot( files.stream().collect(Collectors.toMap(StoreFileMetadata::name, Function.identity())), emptyMap(), diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/CustomDateFormatTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/CustomDateFormatTestCase.java index 861d6f1582321..3f70c52df64b0 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/CustomDateFormatTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/CustomDateFormatTestCase.java @@ -71,7 +71,7 @@ public void testCustomDateFormatsWithNowFunctions() throws IOException { } } - private void createIndex() throws IOException { + private static void createIndex() throws IOException { Request request = new Request("PUT", "/test"); XContentBuilder index = JsonXContent.contentBuilder().prettyPrint().startObject(); diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/FieldExtractorTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/FieldExtractorTestCase.java index 23c7579c8716c..253736dfa01a0 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/FieldExtractorTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/FieldExtractorTestCase.java @@ -1271,7 +1271,7 @@ private enum NestedFieldType { LEAF; } - private void addField( + private static void addField( XContentBuilder index, boolean nestedFieldAdded, int remainingFields, @@ -1342,16 +1342,19 @@ private void addField( } } - private boolean shouldAddNestedField() { + private static boolean shouldAddNestedField() { return randomBoolean(); } - private void createIndexWithFieldTypeAndAlias(String type, Map> fieldProps, Map indexProps) - throws IOException { + private static void createIndexWithFieldTypeAndAlias( + String type, + Map> fieldProps, + Map indexProps + ) throws IOException { createIndexWithFieldTypeAndProperties(type, fieldProps, indexProps, true, false, null); } - private void createIndexWithFieldTypeAndProperties( + private static void createIndexWithFieldTypeAndProperties( String type, Map> fieldProps, Map indexProps @@ -1359,7 +1362,7 @@ private void createIndexWithFieldTypeAndProperties( createIndexWithFieldTypeAndProperties(type, fieldProps, indexProps, false, false, null); } - private void createIndexWithFieldTypeAndSubFields( + private static void createIndexWithFieldTypeAndSubFields( String type, Map> fieldProps, Map indexProps, @@ -1369,7 +1372,7 @@ private void createIndexWithFieldTypeAndSubFields( createIndexWithFieldTypeAndProperties(type, fieldProps, indexProps, false, true, subFieldsProps, subFieldsTypes); } - private void createIndexWithFieldTypeAndProperties( + private static void createIndexWithFieldTypeAndProperties( String type, Map> fieldProps, Map indexProps, @@ -1450,7 +1453,7 @@ private void createIndexWithFieldTypeAndProperties( client().performRequest(request); } - private Request buildRequest(String query) { + private static Request buildRequest(String query) { Request request = new Request("POST", RestSqlTestCase.SQL_QUERY_REST_ENDPOINT); request.addParameter("error_trace", "true"); request.addParameter("pretty", "true"); @@ -1459,14 +1462,14 @@ private Request buildRequest(String query) { return request; } - private Map runSql(String query) throws IOException { + private static Map runSql(String query) throws IOException { Response response = client().performRequest(buildRequest(query)); try (InputStream content = response.getEntity().getContent()) { return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); } } - private JDBCType jdbcTypeFor(String esType) { + private static JDBCType jdbcTypeFor(String esType) { return switch (esType) { case "long" -> JDBCType.BIGINT; case "integer" -> JDBCType.INTEGER; diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java index 88fbf4112c611..70f4c932b7142 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java @@ -206,7 +206,7 @@ public void testFloatingPointNumbersReturnTypes() throws IOException { } @SuppressWarnings({ "unchecked" }) - private void assertFloatingPointNumbersReturnTypes(Request request, Mode mode) throws IOException { + private static void assertFloatingPointNumbersReturnTypes(Request request, Mode mode) throws IOException { String requestContent = query( "SELECT " + "CAST(1234.34 AS REAL) AS float_positive," @@ -249,12 +249,19 @@ private void assertFloatingPointNumbersReturnTypes(Request request, Mode mode) t assertEquals(row.get(3), -1234567890123.34d); } - private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize) throws IOException { + private static void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize) + throws IOException { assertQuery(sql, columnName, columnType, columnValue, null, displaySize); } - private void assertQuery(String sql, String columnName, String columnType, Object columnValue, Object cliColumnValue, int displaySize) - throws IOException { + private static void assertQuery( + String sql, + String columnName, + String columnType, + Object columnValue, + Object cliColumnValue, + int displaySize + ) throws IOException { for (Mode mode : Mode.values()) { boolean isCliCheck = mode == CLI && cliColumnValue != null; assertQuery(sql, columnName, columnType, isCliCheck ? cliColumnValue : columnValue, displaySize, mode); @@ -262,7 +269,7 @@ private void assertQuery(String sql, String columnName, String columnType, Objec } @SuppressWarnings({ "unchecked" }) - private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize, Mode mode) + private static void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize, Mode mode) throws IOException { boolean columnar = randomBoolean(); Map response = runSql(mode, sql, columnar); @@ -293,7 +300,7 @@ private void assertQuery(String sql, String columnName, String columnType, Objec } } - private Map runSql(Mode mode, String sql, boolean columnar) throws IOException { + private static Map runSql(Mode mode, String sql, boolean columnar) throws IOException { Request request = new Request("POST", SQL_QUERY_REST_ENDPOINT); String requestContent = query(sql).mode(mode).toString(); String format = randomFrom(XContentType.JSON, XContentType.SMILE, XContentType.CBOR, XContentType.YAML).name() diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/EmbeddedCli.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/EmbeddedCli.java index 9636129c5f25c..1e3de6317d239 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/EmbeddedCli.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/EmbeddedCli.java @@ -274,7 +274,7 @@ public String command(String command) throws IOException { * Create the "echo" that we expect jLine to send to the terminal * while we're typing a command. */ - private List expectedCommandEchos(String command) { + private static List expectedCommandEchos(String command) { List commandLines = Arrays.stream(command.split("\n")).filter(s -> s.isEmpty() == false).toList(); List result = new ArrayList<>(commandLines.size() * 2); result.add("[?1h=[?2004h[33msql> [0m" + commandLines.get(0)); diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/PartialResultsTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/PartialResultsTestCase.java index 163284cada329..bebbbc16f631f 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/PartialResultsTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/cli/PartialResultsTestCase.java @@ -17,7 +17,7 @@ public abstract class PartialResultsTestCase extends CliIntegrationTestCase { - private void createTestIndex(int okShards, int badShards) throws IOException { + private static void createTestIndex(int okShards, int badShards) throws IOException { final String mappingTemplate = """ { "aliases": { diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlPaginationTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlPaginationTestCase.java index 7e298b2ad3e5d..72871aadce2b5 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlPaginationTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlPaginationTestCase.java @@ -62,7 +62,7 @@ public void testPaginationIsConsistentWithPivotQueries() throws Exception { assertNoSearchContexts(client()); } - private String fetchRemainingPages(String cursor, String format) throws IOException { + private static String fetchRemainingPages(String cursor, String format) throws IOException { StringBuilder result = new StringBuilder(); while (cursor != null) { Tuple response = runSqlAsText(cursor(cursor), format); diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index e47c3afe6a776..81cc54db19669 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -1759,7 +1759,7 @@ public void testFetchAllPagesListCursor(String format) throws IOException { * 2. There are at most `expectedValues.size() / pageSize + 1` pages (the last one might or might not be empty) * 3. Optionally: That the last page is not empty. */ - private void testFetchAllPages(String format, String query, List expectedValues, int pageSize, boolean emptyLastPage) + private static void testFetchAllPages(String format, String query, List expectedValues, int pageSize, boolean emptyLastPage) throws IOException { int remainingPages = expectedValues.size() / pageSize + 1; @@ -1801,7 +1801,7 @@ public void testDataStreamInShowTablesFiltered() throws IOException { expectDataStreamInShowTables(dataStreamName, "SHOW TABLES \\\"" + dataStreamName + "*\\\""); } - private void expectDataStreamInShowTables(String dataStreamName, String sql) throws IOException { + private static void expectDataStreamInShowTables(String dataStreamName, String sql) throws IOException { try { createDataStream(dataStreamName); diff --git a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java index c979feba781c7..626679eaf6669 100644 --- a/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java +++ b/x-pack/plugin/sql/qa/server/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java @@ -310,7 +310,7 @@ private void index(List docs) throws IOException { client().performRequest(request); } - private Map getStats() throws UnsupportedOperationException, IOException { + private static Map getStats() throws UnsupportedOperationException, IOException { Request request = new Request("GET", SQL_STATS_REST_ENDPOINT); Map responseAsMap; try (InputStream content = client().performRequest(request).getEntity().getContent()) { @@ -320,7 +320,7 @@ private Map getStats() throws UnsupportedOperationException, IOE return responseAsMap; } - private void runTranslate(String sql) throws IOException { + private static void runTranslate(String sql) throws IOException { Request request = new Request("POST", SQL_TRANSLATE_REST_ENDPOINT); if (randomBoolean()) { // We default to JSON but we force it randomly for extra coverage @@ -349,7 +349,7 @@ private String scroll(String cursor) throws IOException { } @SuppressWarnings({ "unchecked", "rawtypes" }) - private void assertTranslateQueryMetric(int expected, Map responseAsMap) throws IOException { + private static void assertTranslateQueryMetric(int expected, Map responseAsMap) throws IOException { List>> nodesListStats = (List) responseAsMap.get("stats"); int actualMetricValue = 0; for (Map perNodeStats : nodesListStats) { @@ -384,7 +384,7 @@ private String runSql(String sql, Integer fetchSize) throws IOException { } @SuppressWarnings({ "unchecked", "rawtypes" }) - private void assertFeatureMetric(int expected, Map responseAsMap, String feature) throws IOException { + private static void assertFeatureMetric(int expected, Map responseAsMap, String feature) throws IOException { List> nodesListStats = (List>) responseAsMap.get("stats"); int actualMetricValue = 0; for (Map perNodeStats : nodesListStats) { @@ -395,7 +395,8 @@ private void assertFeatureMetric(int expected, Map responseAsMap } @SuppressWarnings({ "unchecked", "rawtypes" }) - private void assertQueryMetric(int expected, Map responseAsMap, String queryType, String metric) throws IOException { + private static void assertQueryMetric(int expected, Map responseAsMap, String queryType, String metric) + throws IOException { List>> nodesListStats = (List) responseAsMap.get("stats"); int actualMetricValue = 0; for (Map perNodeStats : nodesListStats) { @@ -410,7 +411,7 @@ private void assertClientTypeQueryMetric(int expected, Map respo assertQueryMetric(expected, responseAsMap, clientType, metric); } - private void assertAllQueryMetric(int expected, Map responseAsMap, String metric) throws IOException { + private static void assertAllQueryMetric(int expected, Map responseAsMap, String metric) throws IOException { assertQueryMetric(expected, responseAsMap, "_all", metric); } diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java index 60264874472ef..8bd1e2c5d0581 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java @@ -151,7 +151,7 @@ private void execute(String uri, boolean debug, boolean binary, String keystoreL } } - private void checkConnection(CliSession cliSession, CliTerminal cliTerminal, ConnectionConfiguration con) throws UserException { + private static void checkConnection(CliSession cliSession, CliTerminal cliTerminal, ConnectionConfiguration con) throws UserException { try { cliSession.checkConnection(); } catch (ClientException ex) { diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilder.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilder.java index d24a0d22a4e30..3d6324bc44557 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilder.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/ConnectionBuilder.java @@ -101,7 +101,7 @@ public ConnectionConfiguration buildConnection(String connectionStringArg, Strin } @SuppressForbidden(reason = "cli application shouldn't depend on ES") - private Path getKeystorePath(String keystoreLocation) { + private static Path getKeystorePath(String keystoreLocation) { return Paths.get(keystoreLocation); } diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java index b2f5e998baa4e..8efc920bdbc16 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java @@ -60,7 +60,7 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String l return true; } - private void handleText(CliTerminal terminal, String str) { + private static void handleText(CliTerminal terminal, String str) { terminal.print(str); } } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java index 77e9012afa814..3c8f614145719 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -152,7 +152,7 @@ private ResponseWithWarnings con.request( (out) -> out.write(requestBytes), - this::readFrom, + HttpClient::readFrom, "POST", requestBodyContentType.mediaTypeWithoutParameters() // "application/cbor" or "application/json" ) @@ -199,7 +199,7 @@ private Response get(String path, CheckedFunction con.request(null, this::readFrom, "GET") + con -> con.request(null, HttpClient::readFrom, "GET") ) ).getResponseOrThrowException(); return fromContent(contentType(response.v1()), response.v2(), responseParser); @@ -216,7 +216,7 @@ private byte[] toContent(Request request) { } } - private Tuple>, byte[]> readFrom(InputStream inputStream, Function> headers) { + private static Tuple>, byte[]> readFrom(InputStream inputStream, Function> headers) { ByteArrayOutputStream out = new ByteArrayOutputStream(); try { Streams.copy(inputStream, out); @@ -227,7 +227,7 @@ private Tuple>, byte[]> readFrom(InputStream input } - private ContentType contentType(Function> headers) { + private static ContentType contentType(Function> headers) { List contentTypeHeaders = headers.apply("Content-Type"); String contentType = contentTypeHeaders == null || contentTypeHeaders.isEmpty() ? null : contentTypeHeaders.get(0); @@ -239,7 +239,7 @@ private ContentType contentType(Function> headers) { } } - private Response fromContent( + private static Response fromContent( ContentType type, byte[] bytesReference, CheckedFunction responseParser diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java index a632bb6b3698e..b41c5864a28b0 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/JreHttpUrlConnection.java @@ -187,7 +187,7 @@ public ResponseOrException request( } } - private Function> getHeaderFields(URLConnection con) { + private static Function> getHeaderFields(URLConnection con) { return header -> { List values = new LinkedList<>(); for (Map.Entry> entry : con.getHeaderFields().entrySet()) { @@ -199,7 +199,7 @@ private Function> getHeaderFields(URLConnection con) { }; } - private boolean shouldParseBody(int responseCode) { + private static boolean shouldParseBody(int responseCode) { return responseCode == 200 || responseCode == 201 || responseCode == 202; } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ProxyConfig.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ProxyConfig.java index f4aa8099bf60b..36628e5d59d90 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ProxyConfig.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/ProxyConfig.java @@ -40,7 +40,7 @@ class ProxyConfig { } @SuppressForbidden(reason = "create the actual proxy") - private Proxy createProxy(Proxy.Type type, Object[] address) { + private static Proxy createProxy(Proxy.Type type, Object[] address) { return new Proxy(type, new InetSocketAddress((String) address[0], (int) address[1])); } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/SslConfig.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/SslConfig.java index e1645b3ed5833..72c891bf52a43 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/SslConfig.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/SslConfig.java @@ -130,7 +130,7 @@ private KeyManager[] loadKeyManagers() throws GeneralSecurityException, IOExcept return kmFactory.getKeyManagers(); } - private KeyStore loadKeyStore(String source, char[] pass, String keyStoreType) throws GeneralSecurityException, IOException { + private static KeyStore loadKeyStore(String source, char[] pass, String keyStoreType) throws GeneralSecurityException, IOException { KeyStore keyStore = KeyStore.getInstance(keyStoreType); Path path = Paths.get(source); diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ConstructingObjectParser.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ConstructingObjectParser.java index 77b5e46cebf31..d6ef897ed58b0 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ConstructingObjectParser.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ConstructingObjectParser.java @@ -104,7 +104,7 @@ private BiConsumer queueingConsumer(BiConsumer consumer }; } - private boolean isConstructorArg(BiConsumer consumer) { + private static boolean isConstructorArg(BiConsumer consumer) { return consumer == REQUIRED_CONSTRUCTOR_ARG_MARKER || consumer == OPTIONAL_CONSTRUCTOR_ARG_MARKER; } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ObjectParser.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ObjectParser.java index 2b5d94d9c51b0..b3d21193b5915 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ObjectParser.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ObjectParser.java @@ -174,7 +174,7 @@ Value parse(JsonParser parser, Value value, Context context) throws IOException return value; } - private void maybeMarkRequiredField(String currentFieldName, List requiredFields) { + private static void maybeMarkRequiredField(String currentFieldName, List requiredFields) { Iterator iter = requiredFields.iterator(); while (iter.hasNext()) { String[] requiredFieldNames = iter.next(); @@ -232,7 +232,7 @@ private void parseValue(JsonParser parser, FieldParser fieldParser, String curre } } - private void throwMustEndOn(JsonParser parser, String currentFieldName, JsonToken token) { + private static void throwMustEndOn(JsonParser parser, String currentFieldName, JsonToken token) { throw new ParseException(location(parser), "parser for [" + currentFieldName + "] did not end on " + token); } @@ -258,7 +258,7 @@ private void throwFailedToParse(JsonParser parser, String currentFieldName, Exce throw new ParseException(location(parser), "[" + name + "] failed to parse field [" + currentFieldName + "]", ex); } - private void throwMissingRequiredFields(List requiredFields) { + private static void throwMissingRequiredFields(List requiredFields) { final StringBuilder message = new StringBuilder(); for (String[] fields : requiredFields) { message.append("Required one of fields ").append(Arrays.toString(fields)).append(", but none were specified. "); diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ParsedMediaType.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ParsedMediaType.java index cb877c7a285ff..c005da5ff0789 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ParsedMediaType.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/content/ParsedMediaType.java @@ -118,7 +118,7 @@ public String responseContentTypeHeader() { return mediaTypeWithoutParameters() + formatParameters(parameters); } - private String formatParameters(Map params) { + private static String formatParameters(Map params) { String joined = params.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining(";")); return joined.isEmpty() ? "" : ";" + joined; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java index d7b95f80298e2..c0bf3efa74f13 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -202,7 +202,7 @@ protected LogicalPlan rule(With plan) { return substituteCTE(plan.child(), plan.subQueries()); } - private LogicalPlan substituteCTE(LogicalPlan p, Map subQueries) { + private static LogicalPlan substituteCTE(LogicalPlan p, Map subQueries) { if (p instanceof UnresolvedRelation ur) { SubQueryAlias subQueryAlias = subQueries.get(ur.table().index()); if (subQueryAlias != null) { @@ -344,7 +344,7 @@ else if (plan instanceof OrderBy o) { }); } - private List expandProjections(List projections, LogicalPlan child) { + private static List expandProjections(List projections, LogicalPlan child) { List result = new ArrayList<>(); List output = child.output(); @@ -366,7 +366,7 @@ private List expandProjections(List return result; } - private List expandStar(UnresolvedStar us, List output) { + private static List expandStar(UnresolvedStar us, List output) { List expanded = new ArrayList<>(); // a qualifier is specified - since this is a star, it should be a CompoundDataType @@ -532,7 +532,7 @@ protected LogicalPlan doRule(LogicalPlan plan) { return plan; } - private Integer findOrdinal(Expression expression) { + private static Integer findOrdinal(Expression expression) { if (expression.foldable()) { if (expression.dataType().isInteger()) { Object v = Foldables.valueOf(expression); @@ -789,7 +789,7 @@ protected LogicalPlan rule(LogicalPlan plan) { return plan; } - private Expression replaceAliases(Expression condition, List named) { + private static Expression replaceAliases(Expression condition, List named) { List aliases = new ArrayList<>(); named.forEach(n -> { if (n instanceof Alias) { @@ -878,11 +878,11 @@ protected LogicalPlan doRule(LogicalPlan plan) { return plan; } - private boolean hasUnresolvedAliases(List expressions) { + private static boolean hasUnresolvedAliases(List expressions) { return expressions != null && Expressions.anyMatch(expressions, UnresolvedAlias.class::isInstance); } - private List assignAliases(List exprs) { + private static List assignAliases(List exprs) { List newExpr = new ArrayList<>(exprs.size()); for (NamedExpression expr : exprs) { NamedExpression transformed = (NamedExpression) expr.transformUp(UnresolvedAlias.class, ua -> { @@ -1015,7 +1015,7 @@ protected LogicalPlan rule(Filter f, AnalyzerContext context) { return f; } - private Set findMissingAggregate(Aggregate target, Expression from) { + private static Set findMissingAggregate(Aggregate target, Expression from) { Set missing = new LinkedHashSet<>(); for (Expression filterAgg : from.collect(Functions::isAggregate)) { @@ -1106,10 +1106,10 @@ protected boolean skipResolved() { @Override protected LogicalPlan rule(LogicalPlan plan) { - return plan.transformExpressionsDown(this::implicitCast); + return plan.transformExpressionsDown(ImplicitCasting::implicitCast); } - private Expression implicitCast(Expression e) { + private static Expression implicitCast(Expression e) { if (e.childrenResolved() == false) { return e; } @@ -1209,7 +1209,7 @@ protected LogicalPlan rule(LogicalPlan plan) { return plan.transformExpressionsOnly(Alias.class, a -> a.child()); } - private List cleanChildrenAliases(List args) { + private static List cleanChildrenAliases(List args) { List cleaned = new ArrayList<>(args.size()); for (NamedExpression ne : args) { cleaned.add((NamedExpression) trimNonTopLevelAliases(ne)); @@ -1217,7 +1217,7 @@ private List cleanChildrenAliases(List cleanAllAliases(List args) { + private static List cleanAllAliases(List args) { List cleaned = new ArrayList<>(args.size()); for (Expression e : args) { cleaned.add(trimAliases(e)); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index 72af249f17b6b..c7826919c5999 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -274,7 +274,7 @@ Collection verify(LogicalPlan plan, SqlVersion version) { return failures; } - private void checkNestedAggregation(LogicalPlan p, Set localFailures, AttributeMap attributeRefs) { + private static void checkNestedAggregation(LogicalPlan p, Set localFailures, AttributeMap attributeRefs) { if (p instanceof Aggregate) { ((Aggregate) p).child().forEachDown(Aggregate.class, a -> { localFailures.add(fail(a, "Nested aggregations in sub-selects are not supported.")); @@ -282,7 +282,7 @@ private void checkNestedAggregation(LogicalPlan p, Set localFailures, A } } - private void checkFullTextSearchInSelect(LogicalPlan plan, Set localFailures) { + private static void checkFullTextSearchInSelect(LogicalPlan plan, Set localFailures) { plan.forEachUp(Project.class, p -> { for (NamedExpression ne : p.projections()) { ne.forEachUp( diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotRowSet.java index fae22e1ba60dc..fb78aa950f61d 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/PivotRowSet.java @@ -117,7 +117,7 @@ else if (hasNull(currentRow) == false || data.isEmpty()) { lastAfterKey = currentRowGroupKey; } - private boolean hasNull(Object[] currentRow) { + private static boolean hasNull(Object[] currentRow) { for (Object object : currentRow) { if (object == null) { return true; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index 1fa2ca845b218..d3ab0f797ae2f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -304,7 +304,7 @@ protected LogicalPlan rule(Aggregate agg) { static class PruneOrderByNestedFields extends OptimizerRule { - private void findNested(Expression exp, AttributeMap functions, Consumer onFind) { + private static void findNested(Expression exp, AttributeMap functions, Consumer onFind) { exp.forEachUp(e -> { if (e instanceof ReferenceAttribute) { Function f = functions.resolve(e); @@ -549,7 +549,10 @@ protected LogicalPlan rule(UnaryPlan plan) { // normally only the upper projections should survive but since the lower list might have aliases definitions // that might be reused by the upper one, these need to be replaced. // for example an alias defined in the lower list might be referred in the upper - without replacing it the alias becomes invalid - private List combineProjections(List upper, List lower) { + private static List combineProjections( + List upper, + List lower + ) { // TODO: this need rewriting when moving functions of NamedExpression @@ -636,7 +639,7 @@ protected LogicalPlan rule(LogicalPlan plan) { } - private boolean canPropagateFoldable(LogicalPlan p) { + private static boolean canPropagateFoldable(LogicalPlan p) { return p instanceof Project || p instanceof Filter || p instanceof SubQueryAlias @@ -827,7 +830,7 @@ protected LogicalPlan rule(UnaryPlan plan) { return plan; } - private LocalRelation unfilteredLocalRelation(LogicalPlan plan) { + private static LocalRelation unfilteredLocalRelation(LogicalPlan plan) { List filterOrLeaves = plan.collectFirstChildren(p -> p instanceof Filter || p instanceof LeafPlan); if (filterOrLeaves.size() == 1) { @@ -1172,7 +1175,7 @@ protected LogicalPlan rule(UnaryPlan plan) { return plan; } - private List extractLiterals(List named) { + private static List extractLiterals(List named) { List values = new ArrayList<>(); for (NamedExpression n : named) { if (n instanceof Alias a) { @@ -1198,14 +1201,16 @@ static class SkipQueryForLiteralAggregations extends OptimizerRule { @Override protected LogicalPlan rule(Aggregate plan) { - if (plan.groupings().isEmpty() && plan.child() instanceof EsRelation && plan.aggregates().stream().allMatch(this::foldable)) { + if (plan.groupings().isEmpty() + && plan.child() instanceof EsRelation + && plan.aggregates().stream().allMatch(SkipQueryForLiteralAggregations::foldable)) { return plan.replaceChild(new LocalRelation(plan.source(), new SingletonExecutable())); } return plan; } - private boolean foldable(Expression e) { + private static boolean foldable(Expression e) { if (e instanceof Alias) { e = ((Alias) e).child(); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java index 0b1d346bf051d..c170689e1fa90 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java @@ -249,7 +249,7 @@ public LogicalPlan visitRelation(RelationContext ctx) { return result; } - private Join doJoin(JoinRelationContext ctx) { + private static Join doJoin(JoinRelationContext ctx) { JoinCriteriaContext criteria = ctx.joinCriteria(); if (criteria != null) { @@ -286,7 +286,7 @@ public LogicalPlan visitTableName(TableNameContext ctx) { return new UnresolvedRelation(source(ctx), tableIdentifier, alias, includeFrozen); } - private Limit limit(LogicalPlan plan, Source source, Token limit) { + private static Limit limit(LogicalPlan plan, Source source, Token limit) { return new Limit(source, new Literal(source, Integer.parseInt(limit.getText()), DataTypes.INTEGER), plan); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java index 2f94b5e869a33..d1233a63c4d57 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/Mapper.java @@ -116,7 +116,7 @@ protected PhysicalPlan map(Join j) { return join(j); } - private PhysicalPlan join(Join join) { + private static PhysicalPlan join(Join join) { // TODO: pick up on nested/parent-child docs // 2. Hash? // 3. Cartesian diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java index 1a90ee22e29db..8167305ef0cee 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java @@ -90,7 +90,7 @@ protected void resolveIndicesAndGetCheckpoint(Task task, Request request, Action new AsyncGetCheckpointsFromNodesAction(state, task, nodesAndShards, new OriginalIndices(request), listener).start(); } - private Map> resolveIndicesToPrimaryShards(ClusterState state, String[] concreteIndices) { + private static Map> resolveIndicesToPrimaryShards(ClusterState state, String[] concreteIndices) { if (concreteIndices.length == 0) { return Collections.emptyMap(); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java index 375c6e063ac6d..13423de7d3a85 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java @@ -384,7 +384,7 @@ public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTa // checking for `isNotStopped` as the state COULD be marked as failed for any number of reasons // But if it is in a failed state, _stats will show as much and give good reason to the user. // If it is not able to be assigned to a node all together, we should just close the task completely - private boolean isNotStopped(PersistentTasksCustomMetadata.PersistentTask task) { + private static boolean isNotStopped(PersistentTasksCustomMetadata.PersistentTask task) { TransformState state = (TransformState) task.getState(); return state != null && state.getTaskState().equals(TransformTaskState.STOPPED) == false; } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java index 30577a41998af..107c3c2b5bd65 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java @@ -898,7 +898,7 @@ private void parseCheckpointsLenientlyFromSource( } } - private QueryBuilder buildQueryFromTokenizedIds(String[] idTokens, String resourceName) { + private static QueryBuilder buildQueryFromTokenizedIds(String[] idTokens, String resourceName) { BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery(TransformField.INDEX_DOC_TYPE.getPreferredName(), resourceName)); if (Strings.isAllOrWildcard(idTokens) == false) { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestCatTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestCatTransformAction.java index a7ca920b5a970..b68afa8db75fb 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestCatTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestCatTransformAction.java @@ -211,7 +211,7 @@ private static Table getTableWithHeader() { .endHeaders(); } - private Table buildTable(GetTransformAction.Response response, GetTransformStatsAction.Response statsResponse) { + private static Table buildTable(GetTransformAction.Response response, GetTransformStatsAction.Response statsResponse) { Table table = getTableWithHeader(); Map statsById = statsResponse.getTransformsStats() .stream() diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java index 6e35bcbd18a5d..aa956e47ad49a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java @@ -364,7 +364,7 @@ private static IndexerState currentIndexerState(TransformState previousState) { }; } - private void markAsFailed(TransformTask task, String reason) { + private static void markAsFailed(TransformTask task, String reason) { CountDownLatch latch = new CountDownLatch(1); task.fail( diff --git a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java index 0feafd8b8af02..0c4ff1780ae1e 100644 --- a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java +++ b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java @@ -236,7 +236,7 @@ private LineString buildMercatorLine(Line line) { return geomFactory.createLineString(buildMercatorCoordinates(line)); } - private Coordinate[] buildMercatorCoordinates(Line line) { + private static Coordinate[] buildMercatorCoordinates(Line line) { final Coordinate[] coordinates = new Coordinate[line.length()]; for (int i = 0; i < line.length(); i++) { final double x = SphericalMercatorUtils.lonToSphericalMercator(line.getX(i)); @@ -282,7 +282,7 @@ private org.locationtech.jts.geom.MultiPoint buildMercatorMultiPoint(MultiPoint return geomFactory.createMultiPoint(points); } - private org.locationtech.jts.geom.Geometry clipGeometry( + private static org.locationtech.jts.geom.Geometry clipGeometry( org.locationtech.jts.geom.Geometry tile, org.locationtech.jts.geom.Geometry geometry ) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 0c8690cc62e5f..e5fd1fcbf2035 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -857,7 +857,7 @@ public String getFeatureDescription() { return "Manages Watch definitions and state"; } - private Settings getWatchesIndexSettings() { + private static Settings getWatchesIndexSettings() { return Settings.builder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) @@ -867,7 +867,7 @@ private Settings getWatchesIndexSettings() { .build(); } - private XContentBuilder getWatchesIndexMappings() { + private static XContentBuilder getWatchesIndexMappings() { try { final XContentBuilder builder = jsonBuilder(); @@ -949,7 +949,7 @@ private XContentBuilder getWatchesIndexMappings() { } } - private Settings getTriggeredWatchesIndexSettings() { + private static Settings getTriggeredWatchesIndexSettings() { return Settings.builder() .put("index.number_of_shards", 1) .put("index.auto_expand_replicas", "0-1") @@ -959,7 +959,7 @@ private Settings getTriggeredWatchesIndexSettings() { .build(); } - private XContentBuilder getTriggeredWatchesIndexMappings() { + private static XContentBuilder getTriggeredWatchesIndexMappings() { try { final XContentBuilder builder = jsonBuilder(); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java index ca91d51a2d129..7e16a0353f2cd 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java @@ -328,7 +328,10 @@ private boolean hasShardAllocationIdChanged(String watchIndex, ClusterState stat * - then store the size of the allocation ids and the index position * data.put(ShardId(".watch", 0), new Tuple(1, 4)) */ - Map getLocalShardAllocationIds(List localShards, IndexRoutingTable routingTable) { + static Map getLocalShardAllocationIds( + List localShards, + IndexRoutingTable routingTable + ) { Map data = Maps.newMapWithExpectedSize(localShards.size()); for (ShardRouting shardRouting : localShards) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index 73c3a4c093fdf..571e8912b43b2 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -191,7 +191,7 @@ private void pauseExecution(String reason) { /** * check if watcher has been stopped manually via the stop API */ - private boolean isWatcherStoppedManually(ClusterState state) { + private static boolean isWatcherStoppedManually(ClusterState state) { WatcherMetadata watcherMetadata = state.getMetadata().custom(WatcherMetadata.TYPE); return watcherMetadata != null && watcherMetadata.manuallyStopped(); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index 3a3638a88edcc..0472722bd80a2 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -419,7 +419,7 @@ void refreshWatches(IndexMetadata indexMetadata) { * @param index The index of the local shard * @return true if the we should parse the watch on this node, false otherwise */ - private boolean parseWatchOnThisNode(String id, int totalShardCount, int index) { + private static boolean parseWatchOnThisNode(String id, int totalShardCount, int index) { int hash = Murmur3HashFunction.hash(id); int shardIndex = Math.floorMod(hash, totalShardCount); return shardIndex == index; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java index 0a67129495cb5..b385298c09062 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java @@ -226,7 +226,14 @@ private Map addTimestampToDocument(Map data, Zon /** * Extracts the specified field out of data map, or alternative falls back to the action value */ - private String getField(String actionId, String watchId, String name, Map data, String fieldName, String defaultValue) { + private static String getField( + String actionId, + String watchId, + String name, + Map data, + String fieldName, + String defaultValue + ) { Object obj; // map may be immutable - only try to remove if it's actually there if (data.containsKey(fieldName) && (obj = data.remove(fieldName)) != null) { @@ -255,7 +262,7 @@ private String getField(String actionId, String watchId, String name, Map mutableMap(Map data) { + private static Map mutableMap(Map data) { return data instanceof HashMap ? data : new HashMap<>(data); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java index e70e1ba349086..9a165112c41d1 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java @@ -334,7 +334,7 @@ static void setProxy(RequestConfig.Builder config, HttpRequest request, HttpProx * * @return An HTTP proxy instance, if no settings are configured this will be an HttpProxy.NO_PROXY instance */ - private HttpProxy getProxyFromSettings(Settings settings) { + private static HttpProxy getProxyFromSettings(Settings settings) { String proxyHost = HttpSettings.PROXY_HOST.get(settings); Scheme proxyScheme = HttpSettings.PROXY_SCHEME.exists(settings) ? Scheme.parse(HttpSettings.PROXY_SCHEME.get(settings)) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java index 7dd9ce8c1012f..4ef46374fba0f 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java @@ -200,7 +200,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params toX * Sanitize both internal (see {@link #sanitizeInternalHeaders(Map)} and * user-added sensitive headers that should not be shown. */ - private Map sanitizeHeaders(Map headers) { + private static Map sanitizeHeaders(Map headers) { String authorizationHeader = headers.containsKey("Authorization") ? "Authorization" : null; if (authorizationHeader == null) { authorizationHeader = headers.containsKey("authorization") ? "authorization" : null; @@ -217,7 +217,7 @@ private Map sanitizeHeaders(Map headers) { * Sanitize headers that the user may not have added, but were automatically * added by Elasticsearch. */ - private Map sanitizeInternalHeaders(Map headers) { + private static Map sanitizeInternalHeaders(Map headers) { // Redact the additional webhook password, if present. if (headers.containsKey(WebhookService.TOKEN_HEADER_NAME)) { Map sanitizedHeaders = new HashMap<>(headers); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java index 1d50a3980555e..ddf380652d482 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/text/TextTemplateEngine.java @@ -64,7 +64,7 @@ public String render(TextTemplate textTemplate, Map model) { return compiledTemplate.newInstance(mergedModel).execute(); } - private String trimContentType(TextTemplate textTemplate) { + private static String trimContentType(TextTemplate textTemplate) { String template = textTemplate.getTemplate(); if (template.startsWith("__") == false) { return template; // Doesn't even start with __ so can't have a content type @@ -83,7 +83,7 @@ private String trimContentType(TextTemplate textTemplate) { return template; } - private XContentType detectContentType(String content) { + private static XContentType detectContentType(String content) { if (content.startsWith("__")) { // There must be a __ "failed to execute watch [" + ctx.id().watchId() + "]", e); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java index bee560c33fb62..74c72e468e9b1 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStore.java @@ -94,7 +94,7 @@ public BulkResponse putAll(final List triggeredWatches) throws I * @return The bulk request for the triggered watches * @throws IOException If a triggered watch could not be parsed to JSON, this exception is thrown */ - private BulkRequest createBulkRequest(final List triggeredWatches) throws IOException { + private static BulkRequest createBulkRequest(final List triggeredWatches) throws IOException { BulkRequest request = new BulkRequest(); for (TriggeredWatch triggeredWatch : triggeredWatches) { IndexRequest indexRequest = new IndexRequest(TriggeredWatchStoreField.INDEX_NAME).id(triggeredWatch.id().value()); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java index eff93d3847b7c..d644a4cf48769 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java @@ -173,7 +173,7 @@ private void executeConnect(Transport transport, String user, String password) t } } - private void setContextClassLoader(final ClassLoader classLoader) { + private static void setContextClassLoader(final ClassLoader classLoader) { SecurityManager sm = System.getSecurityManager(); if (sm != null) { // unprivileged code such as scripts do not have SpecialPermission diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java index 92cca7c717299..3afe2fa731860 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java @@ -268,7 +268,7 @@ static boolean recipientDomainsInAllowList(Email email, Set allowedDomai return domains.stream().allMatch(matchesAnyAllowedDomain); } - private EmailSent send(Email email, Authentication auth, Profile profile, Account account) throws MessagingException { + private static EmailSent send(Email email, Authentication auth, Profile profile, Account account) throws MessagingException { assert account != null; try { email = account.send(email, auth, profile); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java index 2bb25436e056b..929150e916d21 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParser.java @@ -80,7 +80,7 @@ public class ReportingAttachmentParser implements EmailAttachmentParser Setting.simpleString(key, Setting.Property.NodeScope, Setting.Property.Dynamic) ); - private static final ObjectParser PARSER = new ObjectParser<>("reporting_attachment"); + private static final ObjectParser PARSER = new ObjectParser<>("reporting_attachment"); private static final ObjectParser PAYLOAD_PARSER = new ObjectParser<>( "reporting_attachment_kibana_payload", true, @@ -98,8 +98,20 @@ public class ReportingAttachmentParser implements EmailAttachmentParser s.parseAuth(p), () -> null, ReportingAttachment.AUTH); - PARSER.declareObjectOrDefault(Builder::proxy, (p, s) -> s.parseProxy(p), () -> null, ReportingAttachment.PROXY); + PARSER.declareObjectOrDefault(Builder::auth, (p, s) -> { + try { + return BasicAuth.parse(p); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }, () -> null, ReportingAttachment.AUTH); + PARSER.declareObjectOrDefault(Builder::proxy, (p, s) -> { + try { + return HttpProxy.parse(p); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }, () -> null, ReportingAttachment.PROXY); PAYLOAD_PARSER.declareString(KibanaReportingPayload::setPath, new ParseField("path")); } @@ -138,7 +150,7 @@ public ReportingAttachmentParser( this.templateEngine = templateEngine; this.logger = LogManager.getLogger(getClass()); clusterSettings.addSettingsUpdateConsumer(REPORT_WARNING_ENABLED_SETTING, this::setWarningEnabled); - clusterSettings.addAffixUpdateConsumer(REPORT_WARNING_TEXT, this::addWarningText, this::warningValidator); + clusterSettings.addAffixUpdateConsumer(REPORT_WARNING_TEXT, this::addWarningText, ReportingAttachmentParser::warningValidator); } void setWarningEnabled(boolean warningEnabled) { @@ -149,8 +161,8 @@ void addWarningText(String name, String value) { customWarnings.put(name, value); } - void warningValidator(String name, String value) { - if (WARNINGS.keySet().contains(name) == false) { + static void warningValidator(String name, String value) { + if (WARNINGS.containsKey(name) == false) { throw new IllegalArgumentException( format( "Warning [%s] is not supported. Only the following warnings are supported [%s]", @@ -169,7 +181,7 @@ public String type() { @Override public ReportingAttachment parse(String id, XContentParser parser) throws IOException { Builder builder = new Builder(id); - PARSER.parse(parser, builder, new AuthParseContext()); + PARSER.parse(parser, builder, null); return builder.build(); } @@ -286,7 +298,7 @@ public Attachment toAttachment(WatchExecutionContext context, Payload payload, R ); } - private void sleep(long sleepMillis, WatchExecutionContext context, ReportingAttachment attachment) { + private static void sleep(long sleepMillis, WatchExecutionContext context, ReportingAttachment attachment) { try { Thread.sleep(sleepMillis); } catch (InterruptedException e) { @@ -344,7 +356,7 @@ private HttpResponse requestReportGeneration(String watchId, String attachmentId /** * Extract the id from JSON payload, so we know which ID to poll for */ - private String extractIdFromJson(String watchId, String attachmentId, BytesReference body) throws IOException { + private static String extractIdFromJson(String watchId, String attachmentId, BytesReference body) throws IOException { // EMPTY is safe here becaus we never call namedObject try ( InputStream stream = body.streamInput(); @@ -369,29 +381,6 @@ private String extractIdFromJson(String watchId, String attachmentId, BytesRefer } } - /** - * A helper class to parse HTTP auth and proxy structures, which is read by an old school pull parser, that is handed over in the ctor. - * See the static parser definition at the top - */ - private static class AuthParseContext { - - BasicAuth parseAuth(XContentParser parser) { - try { - return BasicAuth.parse(parser); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - HttpProxy parseProxy(XContentParser parser) { - try { - return HttpProxy.parse(parser); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - } - /** * Helper class to extract the URL path of the dashboard from the response after a report was triggered * diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEvent.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEvent.java index be008212a3793..e7b05067a5111 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEvent.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/pagerduty/IncidentEvent.java @@ -167,7 +167,7 @@ XContentBuilder buildAPIXContent(XContentBuilder builder, Params params, String /** * Turns the V1 API contexts into 2 distinct lists, images and links. The V2 API has separated these out into 2 top level fields. */ - private void toXContentV2Contexts(XContentBuilder builder, ToXContent.Params params, IncidentEventContext[] contexts) + private static void toXContentV2Contexts(XContentBuilder builder, ToXContent.Params params, IncidentEventContext[] contexts) throws IOException { // contexts can be either links or images, and the v2 api needs them separate Map> groups = Arrays.stream(contexts) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportActivateWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportActivateWatchAction.java index a821e75569c9e..b5a74aecea5b5 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportActivateWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportActivateWatchAction.java @@ -118,7 +118,7 @@ protected void doExecute(ActivateWatchRequest request, ActionListener allocationIds = listener.getLocalShardAllocationIds( + Map allocationIds = WatcherIndexingListener.getLocalShardAllocationIds( asList(shardRouting), indexRoutingTable ); @@ -412,7 +412,7 @@ public void testCheckAllocationIdsWithoutShards() throws Exception { ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, "other", true, STARTED); IndexRoutingTable indexRoutingTable = IndexRoutingTable.builder(index).addShard(shardRouting).build(); - Map allocationIds = listener.getLocalShardAllocationIds( + Map allocationIds = WatcherIndexingListener.getLocalShardAllocationIds( Collections.emptyList(), indexRoutingTable ); @@ -436,7 +436,10 @@ public void testCheckAllocationIdsWithSeveralShards() { .addShard(TestShardRouting.newShardRouting(secondShardId, "node2", false, STARTED)) .build(); - Map allocationIds = listener.getLocalShardAllocationIds(localShards, indexRoutingTable); + Map allocationIds = WatcherIndexingListener.getLocalShardAllocationIds( + localShards, + indexRoutingTable + ); assertThat(allocationIds.size(), is(2)); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java index a8eee3d0f9f1d..ad5609ac34624 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/attachment/ReportingAttachmentParserTests.java @@ -673,7 +673,7 @@ public void testWarningValidation() { String keyName = randomAlphaOfLength(5) + "notavalidsettingname"; IllegalArgumentException expectedException = expectThrows( IllegalArgumentException.class, - () -> reportingAttachmentParser.warningValidator(keyName, randomAlphaOfLength(10)) + () -> ReportingAttachmentParser.warningValidator(keyName, randomAlphaOfLength(10)) ); assertThat(expectedException.getMessage(), containsString(keyName)); assertThat(expectedException.getMessage(), containsString("is not supported")); diff --git a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java index 5c9ddc6ea1f0f..54adb26f7ba69 100644 --- a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java +++ b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java @@ -810,7 +810,7 @@ public Query termQuery(Object value, SearchExecutionContext context) { return wildcardQuery(escapeWildcardSyntax(searchTerm), MultiTermQuery.CONSTANT_SCORE_REWRITE, false, context); } - private String escapeWildcardSyntax(String term) { + private static String escapeWildcardSyntax(String term) { StringBuilder result = new StringBuilder(); for (int i = 0; i < term.length();) { final int c = term.codePointAt(i); diff --git a/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java b/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java index d1d69a4b0fd94..c1126df228cfe 100644 --- a/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java +++ b/x-pack/plugin/write-load-forecaster/src/main/java/org/elasticsearch/xpack/writeloadforecaster/LicensedWriteLoadForecaster.java @@ -97,7 +97,7 @@ public Metadata.Builder withWriteLoadForecastForWriteIndex(String dataStreamName return metadata; } - private void clearPreviousForecast(DataStream dataStream, Metadata.Builder metadata) { + private static void clearPreviousForecast(DataStream dataStream, Metadata.Builder metadata) { if (dataStream.getIndices().size() > 1) { final Index previousWriteIndex = dataStream.getIndices().get(dataStream.getIndices().size() - 2); final IndexMetadata previousWriteIndexMetadata = metadata.getSafe(previousWriteIndex); diff --git a/x-pack/qa/runtime-fields/src/main/java/org/elasticsearch/xpack/runtimefields/test/CoreTestTranslater.java b/x-pack/qa/runtime-fields/src/main/java/org/elasticsearch/xpack/runtimefields/test/CoreTestTranslater.java index 34863db4d4bdb..6bd5c791765ec 100644 --- a/x-pack/qa/runtime-fields/src/main/java/org/elasticsearch/xpack/runtimefields/test/CoreTestTranslater.java +++ b/x-pack/qa/runtime-fields/src/main/java/org/elasticsearch/xpack/runtimefields/test/CoreTestTranslater.java @@ -261,7 +261,7 @@ private boolean modifyCreateIndex(ApiCallSection createIndex) { * runtime fields that load from source. * @return true if this mapping supports runtime fields, false otherwise */ - protected final boolean runtimeifyMappingProperties(Map properties, Map runtimeFields) { + protected static boolean runtimeifyMappingProperties(Map properties, Map runtimeFields) { for (Map.Entry property : properties.entrySet()) { if (false == property.getValue() instanceof Map) { continue; From 17659f8a4840eb39d028a54f545a06f72cc803c5 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 6 Oct 2023 16:07:16 -0700 Subject: [PATCH 048/176] Add pull request check for validating changelogs (#100449) --- .../pipelines/pull-request/validate-changelogs.yml | 9 +++++++++ ...ic+elasticsearch+pull-request+validate-changelogs.yml | 5 +++++ 2 files changed, 14 insertions(+) create mode 100644 .buildkite/pipelines/pull-request/validate-changelogs.yml create mode 100644 .ci/jobs.t/elastic+elasticsearch+pull-request+validate-changelogs.yml diff --git a/.buildkite/pipelines/pull-request/validate-changelogs.yml b/.buildkite/pipelines/pull-request/validate-changelogs.yml new file mode 100644 index 0000000000000..9451d321a9b39 --- /dev/null +++ b/.buildkite/pipelines/pull-request/validate-changelogs.yml @@ -0,0 +1,9 @@ +steps: + - label: validate-changelogs + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed validateChangelogs + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+validate-changelogs.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+validate-changelogs.yml new file mode 100644 index 0000000000000..e0152bf41e885 --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+validate-changelogs.yml @@ -0,0 +1,5 @@ +--- +jjbb-template: pull-request-gradle-unix.yml +vars: + - pr-job: "validate-changelogs" + - gradle-args: "-Dignore.tests.seed validateChangelogs" From 8c0ae384724931f440eaf82c0619468bbf064492 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Fri, 6 Oct 2023 20:06:31 -0700 Subject: [PATCH 049/176] ESQL: Add order to CSV test --- .../esql/qa/testFixtures/src/main/resources/rename.csv-spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec index 5e5c70e3cbba7..96429874338e8 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec @@ -94,12 +94,12 @@ x:integer | y:integer | x2:integer | y2:integer ; duplicateProjectEval -from employees | eval y = languages, x = languages | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3; +from employees | eval y = languages, x = languages | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3 | sort x; x:integer | y:integer | x2:integer | y2:integer 2 | 2 | 3 | 4 -5 | 5 | 6 | 7 4 | 4 | 5 | 6 +5 | 5 | 6 | 7 ; From 06e71272834d0598afe25680ff06eb6252af761b Mon Sep 17 00:00:00 2001 From: Craig Rodrigues Date: Sat, 7 Oct 2023 05:06:54 -0700 Subject: [PATCH 050/176] Switch fleet's built-in ILM policies to use .actions.rollover.max_primary_shard_size (#99984) Fixes #99983 Signed-off-by: Craig Rodrigues --- docs/changelog/99984.yaml | 6 ++++++ .../main/resources/fleet-actions-results-ilm-policy.json | 4 ++-- .../main/resources/fleet-file-fromhost-data-ilm-policy.json | 4 ++-- .../main/resources/fleet-file-fromhost-meta-ilm-policy.json | 4 ++-- .../main/resources/fleet-file-tohost-data-ilm-policy.json | 4 ++-- .../main/resources/fleet-file-tohost-meta-ilm-policy.json | 4 ++-- 6 files changed, 16 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/99984.yaml diff --git a/docs/changelog/99984.yaml b/docs/changelog/99984.yaml new file mode 100644 index 0000000000000..254845591941d --- /dev/null +++ b/docs/changelog/99984.yaml @@ -0,0 +1,6 @@ +pr: 99984 +summary: Switch fleet's built-in ILM policies to use .actions.rollover.max_primary_shard_size +area: ILM+SLM +type: enhancement +issues: + - 99983 diff --git a/x-pack/plugin/core/template-resources/src/main/resources/fleet-actions-results-ilm-policy.json b/x-pack/plugin/core/template-resources/src/main/resources/fleet-actions-results-ilm-policy.json index 176c7e5e70255..264498301e3dc 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/fleet-actions-results-ilm-policy.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/fleet-actions-results-ilm-policy.json @@ -4,7 +4,7 @@ "min_age": "0ms", "actions": { "rollover": { - "max_size": "300gb", + "max_primary_shard_size": "300gb", "max_age": "30d" } } @@ -22,4 +22,4 @@ "description": "default policy for fleet action results indices", "managed": true } -} +} \ No newline at end of file diff --git a/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-fromhost-data-ilm-policy.json b/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-fromhost-data-ilm-policy.json index 7ea1aff57d7e7..a4c5dc272f7f9 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-fromhost-data-ilm-policy.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-fromhost-data-ilm-policy.json @@ -4,7 +4,7 @@ "min_age": "0ms", "actions": { "rollover": { - "max_size": "10gb", + "max_primary_shard_size": "10gb", "max_age": "7d" } } @@ -22,4 +22,4 @@ "description": "policy for fleet uploaded files", "managed": true } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-fromhost-meta-ilm-policy.json b/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-fromhost-meta-ilm-policy.json index 98722614ebc23..c67b3a23cd473 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-fromhost-meta-ilm-policy.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-fromhost-meta-ilm-policy.json @@ -4,7 +4,7 @@ "min_age": "0ms", "actions": { "rollover": { - "max_size": "10gb", + "max_primary_shard_size": "10gb", "max_age": "30d" } } @@ -22,4 +22,4 @@ "description": "policy for fleet uploaded file metadata", "managed": true } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-tohost-data-ilm-policy.json b/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-tohost-data-ilm-policy.json index 89bd092dc2370..226dcf1771c83 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-tohost-data-ilm-policy.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-tohost-data-ilm-policy.json @@ -4,7 +4,7 @@ "min_age": "0ms", "actions": { "rollover": { - "max_size": "10gb", + "max_primary_shard_size": "10gb", "max_age": "14d" } } @@ -22,4 +22,4 @@ "description": "policy for fleet deliverable files", "managed": true } -} +} \ No newline at end of file diff --git a/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-tohost-meta-ilm-policy.json b/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-tohost-meta-ilm-policy.json index 976572fa35247..5d804255a578f 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-tohost-meta-ilm-policy.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/fleet-file-tohost-meta-ilm-policy.json @@ -4,7 +4,7 @@ "min_age": "0ms", "actions": { "rollover": { - "max_size": "10gb", + "max_primary_shard_size": "10gb", "max_age": "30d" } } @@ -22,4 +22,4 @@ "description": "policy for fleet deliverable file metadata", "managed": true } -} +} \ No newline at end of file From 09bf30ae9a4085bf775d5d7c3ba0344e26bbc11c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Sat, 7 Oct 2023 12:08:03 -0400 Subject: [PATCH 051/176] ESQL: Heap attack tests for multivalue fields (#100418) Adds four tests multivalued fields with many values: * `STATS .. BY f0, f1, f2` on one doc where each `f` has 100 values. This finishes. * `STATS .. BY f0, f1, f2` on one doc where each `f` has 1000 values. This circuit breaks. * `KEEP f0, f1, f2, ... f99` on 100 docs where each `f` has 1000 values. This finishes. * `KEEP f0, f1, f2, ... f99` on 500 docs where each `f` has 1000 values. This *should* circuit break, but it crashes the node so it's skipped. --- .../esql/qa/single_node/HeapAttackIT.java | 80 +++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java index 9b44a7eaf8e2f..6cedba3e4ee28 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java @@ -300,6 +300,53 @@ private void fetchManyBigFields(int docs) throws IOException { assertMap(map, matchesMap().entry("columns", columns)); } + public void testAggMvLongs() throws IOException { + int fieldValues = 100; + initMvLongsIndex(1, 3, fieldValues); + Response response = aggMvLongs(3); + Map map = XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); + ListMatcher columns = matchesList().item(matchesMap().entry("name", "MAX(f00)").entry("type", "long")) + .item(matchesMap().entry("name", "f00").entry("type", "long")) + .item(matchesMap().entry("name", "f01").entry("type", "long")) + .item(matchesMap().entry("name", "f02").entry("type", "long")); + assertMap(map, matchesMap().entry("columns", columns)); + } + + public void testAggTooManyMvLongs() throws IOException { + initMvLongsIndex(1, 3, 1000); + assertCircuitBreaks(() -> aggMvLongs(3)); + } + + private Response aggMvLongs(int fields) throws IOException { + StringBuilder builder = new StringBuilder("{\"query\": \"FROM mv_longs | STATS MAX(f00) BY f00"); + for (int f = 1; f < fields; f++) { + builder.append(", f").append(String.format(Locale.ROOT, "%02d", f)); + } + return query(builder.append("\"}").toString(), "columns"); + } + + public void testFetchMvLongs() throws IOException { + int fields = 100; + initMvLongsIndex(100, fields, 1000); + Response response = fetchMvLongs(); + Map map = XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); + ListMatcher columns = matchesList(); + for (int f = 0; f < fields; f++) { + columns = columns.item(matchesMap().entry("name", String.format(Locale.ROOT, "f%02d", f)).entry("type", "long")); + } + assertMap(map, matchesMap().entry("columns", columns)); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") + public void testFetchTooManyMvLongs() throws IOException { + initMvLongsIndex(500, 100, 1000); + assertCircuitBreaks(() -> fetchMvLongs()); + } + + private Response fetchMvLongs() throws IOException { + return query("{\"query\": \"FROM mv_longs\"}", "columns"); + } + private void initManyLongs() throws IOException { logger.info("loading many documents with longs"); StringBuilder bulk = new StringBuilder(); @@ -371,6 +418,39 @@ private void initManyBigFieldsIndex(int docs) throws IOException { initIndex("manybigfields", bulk.toString()); } + private void initMvLongsIndex(int docs, int fields, int fieldValues) throws IOException { + logger.info("loading documents with many multivalued longs"); + int docsPerBulk = 100; + + StringBuilder bulk = new StringBuilder(); + for (int d = 0; d < docs; d++) { + bulk.append("{\"create\":{}}\n"); + for (int f = 0; f < fields; f++) { + if (f == 0) { + bulk.append('{'); + } else { + bulk.append(", "); + } + bulk.append('"').append("f").append(String.format(Locale.ROOT, "%02d", f)).append("\": "); + for (int fv = 0; fv < fieldValues; fv++) { + if (fv == 0) { + bulk.append('['); + } else { + bulk.append(", "); + } + bulk.append(f + fv); + } + bulk.append(']'); + } + bulk.append("}\n"); + if (d % docsPerBulk == docsPerBulk - 1 && d != docs - 1) { + bulk("mv_longs", bulk.toString()); + bulk.setLength(0); + } + } + initIndex("mv_longs", bulk.toString()); + } + private void bulk(String name, String bulk) throws IOException { Request request = new Request("POST", "/" + name + "/_bulk"); request.addParameter("filter_path", "errors"); From 7fdff6d4561423aaf9b932a27d35ee3fb12cd10b Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 7 Oct 2023 10:12:49 -0700 Subject: [PATCH 052/176] Release Block.Builder when finishing BuilderWrapper (#100455) We need to close the BlockBuilderWrapper, which, in turn, will close the enclosing BlockBuilder. --- .../compute/data/BlockUtils.java | 67 ++++++++++++------- .../xpack/esql/CsvTestUtils.java | 23 +++++-- 2 files changed, 60 insertions(+), 30 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java index 7a3eab07bc354..49f49a72cbcc5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Randomness; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import java.util.ArrayList; import java.util.Arrays; @@ -26,7 +28,7 @@ public final class BlockUtils { private BlockUtils() {} - public record BuilderWrapper(Block.Builder builder, Consumer append) { + public record BuilderWrapper(Block.Builder builder, Consumer append) implements Releasable { public BuilderWrapper(Block.Builder builder, Consumer append) { this.builder = builder; this.append = o -> { @@ -49,6 +51,11 @@ public BuilderWrapper(Block.Builder builder, Consumer append) { public void accept(Object object) { append.accept(object); } + + @Override + public void close() { + builder.close(); + } } public static Block[] fromArrayRow(BlockFactory blockFactory, Object... row) { @@ -66,25 +73,34 @@ public static Block[] fromListRow(BlockFactory blockFactory, List row, i var size = row.size(); Block[] blocks = new Block[size]; - for (int i = 0; i < size; i++) { - Object object = row.get(i); - if (object instanceof List listVal) { - BuilderWrapper wrapper = wrapperFor(blockFactory, fromJava(listVal.get(0).getClass()), blockSize); - wrapper.accept(listVal); - Random random = Randomness.get(); - if (isDeduplicated(listVal) && random.nextBoolean()) { - if (isAscending(listVal) && random.nextBoolean()) { - wrapper.builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); - } else { - wrapper.builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_UNORDERD); + boolean success = false; + try { + for (int i = 0; i < size; i++) { + Object object = row.get(i); + if (object instanceof List listVal) { + try (BuilderWrapper wrapper = wrapperFor(blockFactory, fromJava(listVal.get(0).getClass()), blockSize)) { + wrapper.accept(listVal); + Random random = Randomness.get(); + if (isDeduplicated(listVal) && random.nextBoolean()) { + if (isAscending(listVal) && random.nextBoolean()) { + wrapper.builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); + } else { + wrapper.builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_UNORDERD); + } + } + blocks[i] = wrapper.builder.build(); } + } else { + blocks[i] = constantBlock(blockFactory, object, blockSize); } - blocks[i] = wrapper.builder.build(); - } else { - blocks[i] = constantBlock(blockFactory, object, blockSize); + } + success = true; + return blocks; + } finally { + if (success == false) { + Releasables.closeExpectNoException(blocks); } } - return blocks; } /** @@ -126,16 +142,19 @@ public static Block[] fromList(BlockFactory blockFactory, List> lis } var wrappers = new BuilderWrapper[list.get(0).size()]; - - for (int i = 0; i < wrappers.length; i++) { - wrappers[i] = wrapperFor(blockFactory, fromJava(type(list, i)), size); - } - for (List values : list) { - for (int j = 0, vSize = values.size(); j < vSize; j++) { - wrappers[j].append.accept(values.get(j)); + try { + for (int i = 0; i < wrappers.length; i++) { + wrappers[i] = wrapperFor(blockFactory, fromJava(type(list, i)), size); + } + for (List values : list) { + for (int j = 0, vSize = values.size(); j < vSize; j++) { + wrappers[j].append.accept(values.get(j)); + } } + return Arrays.stream(wrappers).map(b -> b.builder.build()).toArray(Block[]::new); + } finally { + Releasables.closeExpectNoException(wrappers); } - return Arrays.stream(wrappers).map(b -> b.builder.build()).toArray(Block[]::new); } /** Returns a deep copy of the given block, using the blockFactory for creating the copy block. */ diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index 953fb65bd1eec..988d77a11beef 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -18,6 +18,8 @@ import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.Page; import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Tuple; import org.elasticsearch.logging.Logger; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; @@ -61,7 +63,7 @@ public static boolean isEnabled(String testName) { public static Tuple> loadPageFromCsv(URL source) throws Exception { - record CsvColumn(String name, Type type, BuilderWrapper builderWrapper) { + record CsvColumn(String name, Type type, BuilderWrapper builderWrapper) implements Releasable { void append(String stringValue) { if (stringValue.contains(",")) {// multi-value field builderWrapper().builder().beginPositionEntry(); @@ -80,6 +82,11 @@ void append(String stringValue) { var converted = stringValue.length() == 0 ? null : type.convert(stringValue); builderWrapper().append().accept(converted); } + + @Override + public void close() { + builderWrapper.close(); + } } CsvColumn[] columns = null; @@ -156,11 +163,15 @@ void append(String stringValue) { } } var columnNames = new ArrayList(columns.length); - var blocks = Arrays.stream(columns) - .peek(b -> columnNames.add(b.name)) - .map(b -> b.builderWrapper.builder().build()) - .toArray(Block[]::new); - return new Tuple<>(new Page(blocks), columnNames); + try { + var blocks = Arrays.stream(columns) + .peek(b -> columnNames.add(b.name)) + .map(b -> b.builderWrapper.builder().build()) + .toArray(Block[]::new); + return new Tuple<>(new Page(blocks), columnNames); + } finally { + Releasables.closeExpectNoException(columns); + } } /** From cb1e901232b28c1fc042098c117080b07f3662d8 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 8 Oct 2023 15:15:01 -0700 Subject: [PATCH 053/176] Ensure deterministic in rename.DuplicateProjectEval (#100473) Without sort, the result can be out of order in the multi-node environment --- .../esql/qa/testFixtures/src/main/resources/rename.csv-spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec index 96429874338e8..1e830486cc7c7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec @@ -94,7 +94,7 @@ x:integer | y:integer | x2:integer | y2:integer ; duplicateProjectEval -from employees | eval y = languages, x = languages | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3 | sort x; +from employees | sort emp_no | eval y = languages, x = languages | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3 | sort x; x:integer | y:integer | x2:integer | y2:integer 2 | 2 | 3 | 4 From a12195c25137d52a9ee394e0046ff76a12515031 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 8 Oct 2023 20:41:22 -0700 Subject: [PATCH 054/176] Reduce num docs in ValuesSourceReaderOperatorTests (#100469) The test failed because it took more than 30 seconds to extract fields for approximately 100,000 documents. With the randomized page size, we can make these tests more reliable and faster by reducing the number of documents. Closes #100235 --- .../compute/lucene/ValuesSourceReaderOperatorTests.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index 4c0e33e5cfb82..3ce202c0e4608 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -202,7 +202,7 @@ public void testLoadAll() { DriverContext driverContext = driverContext(); loadSimpleAndAssert( driverContext, - CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100 * 1024))) + CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(100, 5000))) ); } @@ -212,7 +212,7 @@ public void testLoadAllInOnePage() { driverContext, List.of( CannedSourceOperator.mergePages( - CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100 * 1024))) + CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(100, 5000))) ) ) ); @@ -226,7 +226,7 @@ public void testEmpty() { public void testLoadAllInOnePageShuffled() { DriverContext driverContext = driverContext(); Page source = CannedSourceOperator.mergePages( - CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100 * 1024))) + CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(100, 5000))) ); List shuffleList = new ArrayList<>(); IntStream.range(0, source.getPositionCount()).forEach(i -> shuffleList.add(i)); @@ -384,7 +384,7 @@ public void testValuesSourceReaderOperatorWithNulls() throws IOException { NumericDocValuesField intField = new NumericDocValuesField(intFt.name(), 0); NumericDocValuesField longField = new NumericDocValuesField(longFt.name(), 0); NumericDocValuesField doubleField = new DoubleDocValuesField(doubleFt.name(), 0); - final int numDocs = 100_000; + final int numDocs = between(100, 5000); try (RandomIndexWriter w = new RandomIndexWriter(random(), directory)) { Document doc = new Document(); for (int i = 0; i < numDocs; i++) { From db4f92f0cb52e7ae57c5d7fceb20f347aff389a3 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 8 Oct 2023 20:42:12 -0700 Subject: [PATCH 055/176] Release discarded pages in mutateInstance (#100472) EsqlQueryResponseTests.testEqualsAndHashcode failed on CI because we did not release pages were discarded by randomValueOtherThan while mutating the testing instance. --- .../xpack/esql/action/EsqlQueryResponseTests.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 7920e0575fd89..d71d0074c7ec0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.lucene.UnsupportedValueSource; +import org.elasticsearch.core.Releasables; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; @@ -136,11 +137,12 @@ protected EsqlQueryResponse mutateInstance(EsqlQueryResponse instance) { case 1 -> new EsqlQueryResponse(instance.columns(), deepCopyOfPages(instance), false == instance.columnar()); case 2 -> { int noPages = instance.pages().size(); - yield new EsqlQueryResponse( - instance.columns(), - randomValueOtherThan(instance.pages(), () -> randomList(noPages, noPages, () -> randomPage(instance.columns()))), - instance.columnar() - ); + List differentPages = List.of(); + do { + differentPages.forEach(p -> Releasables.closeExpectNoException(p::releaseBlocks)); + differentPages = randomList(noPages, noPages, () -> randomPage(instance.columns())); + } while (differentPages.equals(instance.pages())); + yield new EsqlQueryResponse(instance.columns(), differentPages, instance.columnar()); } default -> throw new IllegalArgumentException(); }; From dfaec0dbf0c8a0ba2912381334019725848f1f5a Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 9 Oct 2023 07:58:04 +0200 Subject: [PATCH 056/176] Enable Universal Profiling as Enterprise feature (#100333) With this commit we ensure that Universal Profiling can only be used with an Enterprise license. --- docs/changelog/100333.yaml | 5 ++ docs/reference/rest-api/info.asciidoc | 4 ++ docs/reference/rest-api/usage.asciidoc | 4 ++ .../org/elasticsearch/TransportVersions.java | 2 + .../xpack/core/XPackClientPlugin.java | 4 +- .../elasticsearch/xpack/core/XPackField.java | 2 + .../core/action/XPackInfoFeatureAction.java | 4 +- .../core/action/XPackUsageFeatureAction.java | 4 +- .../core/application/ProfilingUsage.java | 31 +++++++++ .../LocalStateProfilingXPackPlugin.java | 25 ++++++++ .../xpack/profiling/ProfilingTestCase.java | 6 +- .../ProfilingInfoTransportAction.java | 49 ++++++++++++++ .../profiling/ProfilingLicenseChecker.java | 39 +++++++++++ .../xpack/profiling/ProfilingPlugin.java | 15 +++-- .../ProfilingUsageTransportAction.java | 64 +++++++++++++++++++ .../TransportGetStackTracesAction.java | 4 ++ .../ProfilingInfoTransportActionTests.java | 56 ++++++++++++++++ .../xpack/security/operator/Constants.java | 2 + 18 files changed, 310 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/100333.yaml create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/ProfilingUsage.java create mode 100644 x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/LocalStateProfilingXPackPlugin.java create mode 100644 x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingInfoTransportAction.java create mode 100644 x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingLicenseChecker.java create mode 100644 x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingUsageTransportAction.java create mode 100644 x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingInfoTransportActionTests.java diff --git a/docs/changelog/100333.yaml b/docs/changelog/100333.yaml new file mode 100644 index 0000000000000..96a2a62deffe5 --- /dev/null +++ b/docs/changelog/100333.yaml @@ -0,0 +1,5 @@ +pr: 100333 +summary: Enable Universal Profiling as Enterprise feature +area: Application +type: enhancement +issues: [] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index ec424ca20d324..28b6df215a18d 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -168,6 +168,10 @@ Example response: "enterprise_search": { "available": true, "enabled": true + }, + "universal_profiling": { + "available": true, + "enabled": true } }, "tagline" : "You know, for X" diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index 99e432eb07e1c..c33d203f1415b 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -473,6 +473,10 @@ GET /_xpack/usage "min_rule_count": 0, "max_rule_count": 0 } + }, + "universal_profiling" : { + "available" : true, + "enabled" : true } } ------------------------------------------------------------ diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index e851434ac2cb7..373dab307f378 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -135,6 +135,8 @@ static TransportVersion def(int id) { public static final TransportVersion NESTED_KNN_VECTOR_QUERY_V = def(8_511_00_0); public static final TransportVersion ML_PACKAGE_LOADER_PLATFORM_ADDED = def(8_512_00_0); public static final TransportVersion PLUGIN_DESCRIPTOR_OPTIONAL_CLASSNAME = def(8_513_00_0); + public static final TransportVersion UNIVERSAL_PROFILING_LICENSE_ADDED = def(8_514_00_0); + /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 5610b18ac627c..6d019e50f9d5f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.core.aggregatemetric.AggregateMetricFeatureSetUsage; import org.elasticsearch.xpack.core.analytics.AnalyticsFeatureSetUsage; import org.elasticsearch.xpack.core.application.EnterpriseSearchFeatureSetUsage; +import org.elasticsearch.xpack.core.application.ProfilingUsage; import org.elasticsearch.xpack.core.archive.ArchiveFeatureSetUsage; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.datastreams.DataStreamFeatureSetUsage; @@ -277,7 +278,8 @@ public List getNamedWriteables() { XPackFeatureSet.Usage.class, XPackField.ENTERPRISE_SEARCH, EnterpriseSearchFeatureSetUsage::new - ) + ), + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.UNIVERSAL_PROFILING, ProfilingUsage::new) ).filter(Objects::nonNull).toList(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index f78f755517d99..c8a78af429592 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -85,6 +85,8 @@ public final class XPackField { /** Name constant for the redact processor feature. */ public static final String REDACT_PROCESSOR = "redact_processor"; + /* Name for Universal Profiling. */ + public static final String UNIVERSAL_PROFILING = "universal_profiling"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java index 1f943a5c68646..859950470f0e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackInfoFeatureAction.java @@ -50,6 +50,7 @@ public class XPackInfoFeatureAction extends ActionType public static final XPackInfoFeatureAction AGGREGATE_METRIC = new XPackInfoFeatureAction(XPackField.AGGREGATE_METRIC); public static final XPackInfoFeatureAction ARCHIVE = new XPackInfoFeatureAction(XPackField.ARCHIVE); public static final XPackInfoFeatureAction ENTERPRISE_SEARCH = new XPackInfoFeatureAction(XPackField.ENTERPRISE_SEARCH); + public static final XPackInfoFeatureAction UNIVERSAL_PROFILING = new XPackInfoFeatureAction(XPackField.UNIVERSAL_PROFILING); public static final List ALL; static { @@ -80,7 +81,8 @@ public class XPackInfoFeatureAction extends ActionType DATA_TIERS, AGGREGATE_METRIC, ARCHIVE, - ENTERPRISE_SEARCH + ENTERPRISE_SEARCH, + UNIVERSAL_PROFILING ) ); ALL = Collections.unmodifiableList(actions); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java index a63d535e2a06c..d96fd91ed3f22 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java @@ -50,6 +50,7 @@ public class XPackUsageFeatureAction extends ActionType ALL = List.of( AGGREGATE_METRIC, @@ -78,7 +79,8 @@ public class XPackUsageFeatureAction extends ActionType> nodePlugins() { return List.of( - LocalStateCompositeXPackPlugin.class, DataStreamsPlugin.class, - ProfilingPlugin.class, + LocalStateProfilingXPackPlugin.class, IndexLifecycle.class, UnsignedLongMapperPlugin.class, VersionFieldPlugin.class, @@ -58,6 +57,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { // .put(LicenseSettings.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial") // Disable ILM history index so that the tests don't have to clean it up .put(LifecycleSettings.LIFECYCLE_HISTORY_INDEX_ENABLED_SETTING.getKey(), false) + .put(LicenseSettings.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial") .build(); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingInfoTransportAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingInfoTransportAction.java new file mode 100644 index 0000000000000..115b165f3e791 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingInfoTransportAction.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; + +public class ProfilingInfoTransportAction extends XPackInfoFeatureTransportAction { + private final boolean enabled; + private final ProfilingLicenseChecker licenseChecker; + + @Inject + public ProfilingInfoTransportAction( + TransportService transportService, + ActionFilters actionFilters, + Settings settings, + ProfilingLicenseChecker licenseChecker + ) { + super(XPackInfoFeatureAction.UNIVERSAL_PROFILING.name(), transportService, actionFilters); + this.enabled = XPackSettings.PROFILING_ENABLED.get(settings); + this.licenseChecker = licenseChecker; + } + + @Override + public String name() { + return XPackField.UNIVERSAL_PROFILING; + } + + @Override + public boolean available() { + return licenseChecker.isSupportedLicense(); + } + + @Override + public boolean enabled() { + return enabled; + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingLicenseChecker.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingLicenseChecker.java new file mode 100644 index 0000000000000..1100c6b10c5f7 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingLicenseChecker.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.LicensedFeature; +import org.elasticsearch.license.XPackLicenseState; + +import java.util.function.Supplier; + +public final class ProfilingLicenseChecker { + private static final LicensedFeature.Momentary UNIVERSAL_PROFILING_FEATURE = LicensedFeature.momentary( + null, + "universal_profiling", + License.OperationMode.ENTERPRISE + ); + + private final Supplier licenseStateResolver; + + public ProfilingLicenseChecker(Supplier licenseStateResolver) { + this.licenseStateResolver = licenseStateResolver; + } + + public boolean isSupportedLicense() { + return UNIVERSAL_PROFILING_FEATURE.checkWithoutTracking(licenseStateResolver.get()); + } + + public void requireSupportedLicense() { + if (UNIVERSAL_PROFILING_FEATURE.check(licenseStateResolver.get()) == false) { + throw LicenseUtils.newComplianceException(UNIVERSAL_PROFILING_FEATURE.getName()); + } + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java index 9ef887ecf5639..d37a5be7543bd 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java @@ -39,7 +39,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import java.util.ArrayList; import java.util.Collection; @@ -108,10 +111,12 @@ public Collection createComponents( registry.get().initialize(); indexManager.get().initialize(); dataStreamManager.get().initialize(); - return List.of(registry.get(), indexManager.get(), dataStreamManager.get()); - } else { - return Collections.emptyList(); } + return Collections.singletonList(createLicenseChecker()); + } + + protected ProfilingLicenseChecker createLicenseChecker() { + return new ProfilingLicenseChecker(XPackPlugin::getSharedLicenseState); } public void updateCheckOutdatedIndices(boolean newValue) { @@ -179,7 +184,9 @@ public static ExecutorBuilder responseExecutorBuilder() { return List.of( new ActionHandler<>(GetStackTracesAction.INSTANCE, TransportGetStackTracesAction.class), new ActionHandler<>(GetFlamegraphAction.INSTANCE, TransportGetFlamegraphAction.class), - new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class) + new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class), + new ActionHandler<>(XPackUsageFeatureAction.UNIVERSAL_PROFILING, ProfilingUsageTransportAction.class), + new ActionHandler<>(XPackInfoFeatureAction.UNIVERSAL_PROFILING, ProfilingInfoTransportAction.class) ); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingUsageTransportAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingUsageTransportAction.java new file mode 100644 index 0000000000000..7e7b431759cd4 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingUsageTransportAction.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; +import org.elasticsearch.xpack.core.application.ProfilingUsage; + +public class ProfilingUsageTransportAction extends XPackUsageFeatureTransportAction { + private final ProfilingLicenseChecker licenseChecker; + + private final boolean enabled; + + @Inject + public ProfilingUsageTransportAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + ProfilingLicenseChecker licenseChecker, + Settings settings + ) { + super( + XPackUsageFeatureAction.UNIVERSAL_PROFILING.name(), + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver + ); + this.licenseChecker = licenseChecker; + this.enabled = XPackSettings.PROFILING_ENABLED.get(settings); + } + + @Override + protected void masterOperation( + Task task, + XPackUsageRequest request, + ClusterState state, + ActionListener listener + ) { + ProfilingUsage profilingUsage = new ProfilingUsage(licenseChecker.isSupportedLicense(), enabled); + listener.onResponse(new XPackUsageFeatureResponse(profilingUsage)); + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index 438fcd1461df0..113b600f7702b 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -88,6 +88,7 @@ public class TransportGetStackTracesAction extends HandledTransportAction submitListener) { + licenseChecker.requireSupportedLicense(); long start = System.nanoTime(); Client client = new ParentTaskAssigningClient(this.nodeClient, transportService.getLocalNode(), submitTask); EventsIndex mediumDownsampled = EventsIndex.MEDIUM_DOWNSAMPLED; diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingInfoTransportActionTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingInfoTransportActionTests.java new file mode 100644 index 0000000000000..b66b8a3db50f9 --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ProfilingInfoTransportActionTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.license.internal.XPackLicenseStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackSettings; + +import static org.hamcrest.core.Is.is; +import static org.mockito.Mockito.mock; + +public class ProfilingInfoTransportActionTests extends ESTestCase { + public void testAvailable() { + // trial mode - allow feature + XPackLicenseState licenseState = new XPackLicenseState(() -> 0); + + boolean enabled = randomBoolean(); + Settings settings = Settings.builder().put(XPackSettings.PROFILING_ENABLED.getKey(), enabled).build(); + + ProfilingInfoTransportAction featureSet = new ProfilingInfoTransportAction( + mock(TransportService.class), + mock(ActionFilters.class), + settings, + new ProfilingLicenseChecker(() -> licenseState) + ); + assertThat(featureSet.available(), is(true)); + assertThat(featureSet.enabled(), is(enabled)); + } + + public void testUnavailable() { + // won't work in BASIC + XPackLicenseState licenseState = new XPackLicenseState(() -> 0, new XPackLicenseStatus(License.OperationMode.BASIC, true, null)); + + boolean enabled = randomBoolean(); + Settings settings = Settings.builder().put(XPackSettings.PROFILING_ENABLED.getKey(), enabled).build(); + + ProfilingInfoTransportAction featureSet = new ProfilingInfoTransportAction( + mock(TransportService.class), + mock(ActionFilters.class), + settings, + new ProfilingLicenseChecker(() -> licenseState) + ); + assertThat(featureSet.available(), is(false)); + assertThat(featureSet.enabled(), is(enabled)); + } +} diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index a9acf669fa750..9f490792d800f 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -347,6 +347,7 @@ public class Constants { "cluster:monitor/xpack/info/spatial", "cluster:monitor/xpack/info/sql", "cluster:monitor/xpack/info/transform", + "cluster:monitor/xpack/info/universal_profiling", "cluster:monitor/xpack/info/voting_only", "cluster:monitor/xpack/info/watcher", "cluster:monitor/xpack/license/get", @@ -410,6 +411,7 @@ public class Constants { "cluster:monitor/xpack/usage/spatial", "cluster:monitor/xpack/usage/sql", "cluster:monitor/xpack/usage/transform", + "cluster:monitor/xpack/usage/universal_profiling", "cluster:monitor/xpack/usage/voting_only", "cluster:monitor/xpack/usage/watcher", "cluster:monitor/xpack/watcher/stats/dist", From a4db40d89c433df0a5cbea6faaeac40761242357 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 9 Oct 2023 19:55:31 +1100 Subject: [PATCH 057/176] Record operation purpose for s3 stats collection (#100236) A new no-op OperationPurpose parameter is added in #99615 to all blob store/container operation method. This PR updates the s3 stats collection code to actually use this parameter for finer grained stats collection and reports. This differentiation between purposes are kept internally for now. The stats are currently aggregated over operations for existing stats reporting. This means responses from both GetRepositoriesMetering API and GetBlobStoreStats API will not be changed. We will have follow-ups to expose the finer stats separately. Relates: #99615 Relates: ES-6800 --- docs/changelog/100236.yaml | 5 + .../s3/S3BlobStoreRepositoryTests.java | 51 +++++- .../repositories/s3/S3BlobContainer.java | 35 ++-- .../repositories/s3/S3BlobStore.java | 149 +++++++++--------- .../s3/S3RetryingInputStream.java | 3 +- .../snapshots/mockstore/BlobStoreWrapper.java | 2 +- 6 files changed, 146 insertions(+), 99 deletions(-) create mode 100644 docs/changelog/100236.yaml diff --git a/docs/changelog/100236.yaml b/docs/changelog/100236.yaml new file mode 100644 index 0000000000000..b33825f9bc553 --- /dev/null +++ b/docs/changelog/100236.yaml @@ -0,0 +1,5 @@ +pr: 100236 +summary: Record operation purpose for s3 stats collection +area: Distributed +type: enhancement +issues: [] diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index d7294cab93844..37f1a9e6ff78e 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -60,18 +60,23 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import java.util.stream.StreamSupport; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") @@ -207,16 +212,28 @@ public void testAbortRequestStats() throws Exception { } public void testRequestStatsWithOperationPurposes() throws IOException { - // The operationPurpose parameter is added but not yet used. This test asserts the new parameter does not change - // the existing stats collection. final String repoName = createRepository(randomRepositoryName()); final RepositoriesService repositoriesService = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName); final BlobStore blobStore = repository.blobStore(); - + assertThat(blobStore, instanceOf(BlobStoreWrapper.class)); + final BlobStore delegateBlobStore = ((BlobStoreWrapper) blobStore).delegate(); + assertThat(delegateBlobStore, instanceOf(S3BlobStore.class)); + final S3BlobStore.StatsCollectors statsCollectors = ((S3BlobStore) delegateBlobStore).getStatsCollectors(); + + // Initial stats are collected with the default operation purpose + final Set allOperations = EnumSet.allOf(S3BlobStore.Operation.class) + .stream() + .map(S3BlobStore.Operation::getKey) + .collect(Collectors.toUnmodifiableSet()); + statsCollectors.collectors.keySet().forEach(statsKey -> assertThat(statsKey.purpose(), is(OperationPurpose.SNAPSHOT))); + final Map initialStats = blobStore.stats(); + assertThat(initialStats.keySet(), equalTo(allOperations)); + + // Collect more stats with an operation purpose other than the default + final OperationPurpose purpose = randomValueOtherThan(OperationPurpose.SNAPSHOT, () -> randomFrom(OperationPurpose.values())); final BlobPath blobPath = repository.basePath().add(randomAlphaOfLength(10)); final BlobContainer blobContainer = blobStore.blobContainer(blobPath); - final OperationPurpose purpose = randomFrom(OperationPurpose.values()); final BytesArray whatToWrite = new BytesArray(randomByteArrayOfLength(randomIntBetween(100, 1000))); blobContainer.writeBlob(purpose, "test.txt", whatToWrite, true); try (InputStream is = blobContainer.readBlob(purpose, "test.txt")) { @@ -224,11 +241,29 @@ public void testRequestStatsWithOperationPurposes() throws IOException { } blobContainer.delete(purpose); - final Map stats = blobStore.stats(); + // Internal stats collection is fine-grained and records different purposes assertThat( - stats.keySet(), - containsInAnyOrder("GetObject", "ListObjects", "PutObject", "PutMultipartObject", "DeleteObjects", "AbortMultipartObject") + statsCollectors.collectors.keySet().stream().map(S3BlobStore.StatsKey::purpose).collect(Collectors.toUnmodifiableSet()), + equalTo(Set.of(OperationPurpose.SNAPSHOT, purpose)) ); + // The stats report aggregates over different purposes + final Map newStats = blobStore.stats(); + assertThat(newStats.keySet(), equalTo(allOperations)); + assertThat(newStats, not(equalTo(initialStats))); + + final Set operationsSeenForTheNewPurpose = statsCollectors.collectors.keySet() + .stream() + .filter(sk -> sk.purpose() != OperationPurpose.SNAPSHOT) + .map(sk -> sk.operation().getKey()) + .collect(Collectors.toUnmodifiableSet()); + + newStats.forEach((k, v) -> { + if (operationsSeenForTheNewPurpose.contains(k)) { + assertThat(newStats.get(k), greaterThan(initialStats.get(k))); + } else { + assertThat(newStats.get(k), equalTo(initialStats.get(k))); + } + }); } public void testEnforcedCooldownPeriod() throws IOException { diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index c7dee4f1599c5..04bdf7b637e27 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -52,6 +52,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.repositories.blobstore.ChunkedBlobOutputStream; +import org.elasticsearch.repositories.s3.S3BlobStore.Operation; import org.elasticsearch.threadpool.ThreadPool; import java.io.ByteArrayInputStream; @@ -204,7 +205,7 @@ protected void onCompletion() throws IOException { uploadId.get(), parts ); - complRequest.setRequestMetricCollector(blobStore.multiPartUploadMetricCollector); + complRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose)); SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest)); } } @@ -237,7 +238,7 @@ private UploadPartRequest createPartUploadRequest( uploadRequest.setUploadId(uploadId); uploadRequest.setPartNumber(number); uploadRequest.setInputStream(stream); - uploadRequest.setRequestMetricCollector(blobStore.multiPartUploadMetricCollector); + uploadRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose)); uploadRequest.setPartSize(size); uploadRequest.setLastPart(lastPart); return uploadRequest; @@ -245,7 +246,7 @@ private UploadPartRequest createPartUploadRequest( private void abortMultiPartUpload(OperationPurpose purpose, String uploadId, String blobName) { final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(blobStore.bucket(), blobName, uploadId); - abortRequest.setRequestMetricCollector(blobStore.abortPartUploadMetricCollector); + abortRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.ABORT_MULTIPART_OBJECT, purpose)); try (AmazonS3Reference clientReference = blobStore.clientReference()) { SocketAccess.doPrivilegedVoid(() -> clientReference.client().abortMultipartUpload(abortRequest)); } @@ -255,7 +256,7 @@ private InitiateMultipartUploadRequest initiateMultiPartUpload(OperationPurpose final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(blobStore.bucket(), blobName); initRequest.setStorageClass(blobStore.getStorageClass()); initRequest.setCannedACL(blobStore.getCannedACL()); - initRequest.setRequestMetricCollector(blobStore.multiPartUploadMetricCollector); + initRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose)); if (blobStore.serverSideEncryption()) { final ObjectMetadata md = new ObjectMetadata(); md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); @@ -285,13 +286,13 @@ public DeleteResult delete(OperationPurpose purpose) throws IOException { final ObjectListing list; if (prevListing != null) { final var listNextBatchOfObjectsRequest = new ListNextBatchOfObjectsRequest(prevListing); - listNextBatchOfObjectsRequest.setRequestMetricCollector(blobStore.listMetricCollector); + listNextBatchOfObjectsRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.LIST_OBJECTS, purpose)); list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(listNextBatchOfObjectsRequest)); } else { final ListObjectsRequest listObjectsRequest = new ListObjectsRequest(); listObjectsRequest.setBucketName(blobStore.bucket()); listObjectsRequest.setPrefix(keyPath); - listObjectsRequest.setRequestMetricCollector(blobStore.listMetricCollector); + listObjectsRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.LIST_OBJECTS, purpose)); list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); } final Iterator blobNameIterator = Iterators.map(list.getObjectSummaries().iterator(), summary -> { @@ -374,7 +375,7 @@ private List executeListing( ObjectListing list; if (prevListing != null) { final var listNextBatchOfObjectsRequest = new ListNextBatchOfObjectsRequest(prevListing); - listNextBatchOfObjectsRequest.setRequestMetricCollector(blobStore.listMetricCollector); + listNextBatchOfObjectsRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.LIST_OBJECTS, purpose)); list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(listNextBatchOfObjectsRequest)); } else { list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); @@ -393,7 +394,7 @@ private ListObjectsRequest listObjectsRequest(OperationPurpose purpose, String p return new ListObjectsRequest().withBucketName(blobStore.bucket()) .withPrefix(pathPrefix) .withDelimiter("/") - .withRequestMetricCollector(blobStore.listMetricCollector); + .withRequestMetricCollector(blobStore.getMetricCollector(Operation.LIST_OBJECTS, purpose)); } // exposed for tests @@ -428,7 +429,7 @@ void executeSingleUpload( final PutObjectRequest putRequest = new PutObjectRequest(s3BlobStore.bucket(), blobName, input, md); putRequest.setStorageClass(s3BlobStore.getStorageClass()); putRequest.setCannedAcl(s3BlobStore.getCannedACL()); - putRequest.setRequestMetricCollector(s3BlobStore.putMetricCollector); + putRequest.setRequestMetricCollector(s3BlobStore.getMetricCollector(Operation.PUT_OBJECT, purpose)); try (AmazonS3Reference clientReference = s3BlobStore.clientReference()) { SocketAccess.doPrivilegedVoid(() -> { clientReference.client().putObject(putRequest); }); @@ -506,7 +507,7 @@ void executeMultipartUpload( uploadId.get(), parts ); - complRequest.setRequestMetricCollector(s3BlobStore.multiPartUploadMetricCollector); + complRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.PUT_MULTIPART_OBJECT, purpose)); SocketAccess.doPrivilegedVoid(() -> clientReference.client().completeMultipartUpload(complRequest)); success = true; @@ -582,7 +583,7 @@ private class CompareAndExchangeOperation { private List listMultipartUploads() { final var listRequest = new ListMultipartUploadsRequest(bucket); listRequest.setPrefix(blobKey); - listRequest.setRequestMetricCollector(blobStore.listMetricCollector); + listRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.LIST_OBJECTS, purpose)); try { return SocketAccess.doPrivileged(() -> client.listMultipartUploads(listRequest)).getMultipartUploads(); } catch (AmazonS3Exception e) { @@ -670,7 +671,7 @@ void run(BytesReference expected, BytesReference updated, ActionListener client.initiateMultipartUpload(initiateRequest)).getUploadId(); final var uploadPartRequest = new UploadPartRequest(); @@ -681,7 +682,7 @@ void run(BytesReference expected, BytesReference updated, ActionListener client.uploadPart(uploadPartRequest)).getPartETag(); final var currentUploads = listMultipartUploads(); @@ -715,7 +716,9 @@ void run(BytesReference expected, BytesReference updated, ActionListener client.completeMultipartUpload(completeMultipartUploadRequest)); isComplete.set(true); } @@ -774,7 +777,7 @@ private void safeAbortMultipartUpload(String uploadId) { private void abortMultipartUploadIfExists(String uploadId) { try { final var request = new AbortMultipartUploadRequest(bucket, blobKey, uploadId); - request.setRequestMetricCollector(blobStore.abortPartUploadMetricCollector); + request.setRequestMetricCollector(blobStore.getMetricCollector(Operation.ABORT_MULTIPART_OBJECT, purpose)); SocketAccess.doPrivilegedVoid(() -> client.abortMultipartUpload(request)); } catch (AmazonS3Exception e) { if (e.getStatusCode() != 404) { @@ -815,7 +818,7 @@ public void compareAndExchangeRegister( public void getRegister(OperationPurpose purpose, String key, ActionListener listener) { ActionListener.completeWith(listener, () -> { final var getObjectRequest = new GetObjectRequest(blobStore.bucket(), buildKey(key)); - getObjectRequest.setRequestMetricCollector(blobStore.getMetricCollector); + getObjectRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.GET_OBJECT, purpose)); try ( var clientReference = blobStore.clientReference(); var s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 911d59aa52dcb..aab72e712136c 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -35,14 +35,17 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; +import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; +import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; @@ -75,14 +78,7 @@ class S3BlobStore implements BlobStore { private final ThreadPool threadPool; private final Executor snapshotExecutor; - private final Stats stats = new Stats(); - - final RequestMetricCollector getMetricCollector; - final RequestMetricCollector listMetricCollector; - final RequestMetricCollector putMetricCollector; - final RequestMetricCollector multiPartUploadMetricCollector; - final RequestMetricCollector deleteMetricCollector; - final RequestMetricCollector abortPartUploadMetricCollector; + private final StatsCollectors statsCollectors = new StatsCollectors(); S3BlobStore( S3Service service, @@ -105,48 +101,10 @@ class S3BlobStore implements BlobStore { this.repositoryMetadata = repositoryMetadata; this.threadPool = threadPool; this.snapshotExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - this.getMetricCollector = new IgnoreNoResponseMetricsCollector() { - @Override - public void collectMetrics(Request request) { - assert request.getHttpMethod().name().equals("GET"); - stats.getCount.addAndGet(getRequestCount(request)); - } - }; - this.listMetricCollector = new IgnoreNoResponseMetricsCollector() { - @Override - public void collectMetrics(Request request) { - assert request.getHttpMethod().name().equals("GET"); - stats.listCount.addAndGet(getRequestCount(request)); - } - }; - this.putMetricCollector = new IgnoreNoResponseMetricsCollector() { - @Override - public void collectMetrics(Request request) { - assert request.getHttpMethod().name().equals("PUT"); - stats.putCount.addAndGet(getRequestCount(request)); - } - }; - this.multiPartUploadMetricCollector = new IgnoreNoResponseMetricsCollector() { - @Override - public void collectMetrics(Request request) { - assert request.getHttpMethod().name().equals("PUT") || request.getHttpMethod().name().equals("POST"); - stats.postCount.addAndGet(getRequestCount(request)); - } - }; - this.deleteMetricCollector = new IgnoreNoResponseMetricsCollector() { - @Override - public void collectMetrics(Request request) { - assert request.getHttpMethod().name().equals("POST"); - stats.deleteCount.addAndGet(getRequestCount(request)); - } - }; - this.abortPartUploadMetricCollector = new IgnoreNoResponseMetricsCollector() { - @Override - public void collectMetrics(Request request) { - assert request.getHttpMethod().name().equals("DELETE"); - stats.abortCount.addAndGet(getRequestCount(request)); - } - }; + } + + RequestMetricCollector getMetricCollector(Operation operation, OperationPurpose purpose) { + return statsCollectors.getMetricCollector(operation, purpose); } public Executor getSnapshotExecutor() { @@ -159,16 +117,43 @@ public TimeValue getCompareAndExchangeTimeToLive() { // metrics collector that ignores null responses that we interpret as the request not reaching the S3 endpoint due to a network // issue - private abstract static class IgnoreNoResponseMetricsCollector extends RequestMetricCollector { + private static class IgnoreNoResponseMetricsCollector extends RequestMetricCollector { + + private final LongAdder counter = new LongAdder(); + private final Operation operation; + + private IgnoreNoResponseMetricsCollector(Operation operation) { + this.operation = operation; + } @Override public final void collectMetrics(Request request, Response response) { if (response != null) { - collectMetrics(request); + assert assertConsistencyBetweenHttpRequestAndOperation(request, operation); + counter.add(getRequestCount(request)); } } - protected abstract void collectMetrics(Request request); + private boolean assertConsistencyBetweenHttpRequestAndOperation(Request request, Operation operation) { + switch (operation) { + case GET_OBJECT, LIST_OBJECTS -> { + return request.getHttpMethod().name().equals("GET"); + } + case PUT_OBJECT -> { + return request.getHttpMethod().name().equals("PUT"); + } + case PUT_MULTIPART_OBJECT -> { + return request.getHttpMethod().name().equals("PUT") || request.getHttpMethod().name().equals("POST"); + } + case DELETE_OBJECTS -> { + return request.getHttpMethod().name().equals("POST"); + } + case ABORT_MULTIPART_OBJECT -> { + return request.getHttpMethod().name().equals("DELETE"); + } + default -> throw new AssertionError("unknown operation [" + operation + "]"); + } + } } private static long getRequestCount(Request request) { @@ -273,7 +258,7 @@ private void deletePartition( private static DeleteObjectsRequest bulkDelete(OperationPurpose purpose, S3BlobStore blobStore, List blobs) { return new DeleteObjectsRequest(blobStore.bucket()).withKeys(blobs.toArray(Strings.EMPTY_ARRAY)) .withQuiet(true) - .withRequestMetricCollector(blobStore.deleteMetricCollector); + .withRequestMetricCollector(blobStore.getMetricCollector(Operation.DELETE_OBJECTS, purpose)); } @Override @@ -283,7 +268,12 @@ public void close() throws IOException { @Override public Map stats() { - return stats.toMap(); + return statsCollectors.statsMap(); + } + + // Package private for testing + StatsCollectors getStatsCollectors() { + return statsCollectors; } public CannedAccessControlList getCannedACL() { @@ -332,29 +322,42 @@ ThreadPool getThreadPool() { return threadPool; } - static class Stats { + enum Operation { + GET_OBJECT("GetObject"), + LIST_OBJECTS("ListObjects"), + PUT_OBJECT("PutObject"), + PUT_MULTIPART_OBJECT("PutMultipartObject"), + DELETE_OBJECTS("DeleteObjects"), + ABORT_MULTIPART_OBJECT("AbortMultipartObject"); - final AtomicLong listCount = new AtomicLong(); + private final String key; - final AtomicLong getCount = new AtomicLong(); + String getKey() { + return key; + } + + Operation(String key) { + this.key = key; + } + } - final AtomicLong putCount = new AtomicLong(); + record StatsKey(Operation operation, OperationPurpose purpose) {} - final AtomicLong postCount = new AtomicLong(); + static class StatsCollectors { + final Map collectors = new ConcurrentHashMap<>(); - final AtomicLong deleteCount = new AtomicLong(); + RequestMetricCollector getMetricCollector(Operation operation, OperationPurpose purpose) { + return collectors.computeIfAbsent(new StatsKey(operation, purpose), k -> buildMetricCollector(k.operation())); + } - final AtomicLong abortCount = new AtomicLong(); + Map statsMap() { + final Map m = Arrays.stream(Operation.values()).collect(Collectors.toMap(Operation::getKey, e -> 0L)); + collectors.forEach((sk, v) -> m.compute(sk.operation().getKey(), (k, c) -> Objects.requireNonNull(c) + v.counter.sum())); + return Map.copyOf(m); + } - Map toMap() { - final Map results = new HashMap<>(); - results.put("GetObject", getCount.get()); - results.put("ListObjects", listCount.get()); - results.put("PutObject", putCount.get()); - results.put("PutMultipartObject", postCount.get()); - results.put("DeleteObjects", deleteCount.get()); - results.put("AbortMultipartObject", abortCount.get()); - return results; + IgnoreNoResponseMetricsCollector buildMetricCollector(Operation operation) { + return new IgnoreNoResponseMetricsCollector(operation); } } } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java index d796eb49e7bcb..6cad60f32de47 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -20,6 +20,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.repositories.s3.S3BlobStore.Operation; import java.io.IOException; import java.io.InputStream; @@ -82,7 +83,7 @@ class S3RetryingInputStream extends InputStream { private void openStream() throws IOException { try (AmazonS3Reference clientReference = blobStore.clientReference()) { final GetObjectRequest getObjectRequest = new GetObjectRequest(blobStore.bucket(), blobKey); - getObjectRequest.setRequestMetricCollector(blobStore.getMetricCollector); + getObjectRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.GET_OBJECT, purpose)); if (currentOffset > 0 || start > 0 || end < Long.MAX_VALUE - 1) { assert start + currentOffset <= end : "requesting beyond end, start = " + start + " offset=" + currentOffset + " end=" + end; diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java index f6811de12bb60..ee7ef2547acb0 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java @@ -44,7 +44,7 @@ public Map stats() { return delegate.stats(); } - protected BlobStore delegate() { + public BlobStore delegate() { return delegate; } From 48e86ad9b7e6f673138f0f7b613eb39f1b9e1fd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Mon, 9 Oct 2023 10:56:59 +0200 Subject: [PATCH 058/176] [Transform] Shutdown the task immediately when `force` == `true` (#100203) --- docs/changelog/100203.yaml | 5 +++++ .../action/TransportStopTransformAction.java | 14 ++++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 docs/changelog/100203.yaml diff --git a/docs/changelog/100203.yaml b/docs/changelog/100203.yaml new file mode 100644 index 0000000000000..23a39cb5020e0 --- /dev/null +++ b/docs/changelog/100203.yaml @@ -0,0 +1,5 @@ +pr: 100203 +summary: Shutdown the task immediately when `force` == `true` +area: Transform +type: bug +issues: [] diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java index bf24c7d7c1d03..54d33f0df3638 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java @@ -242,6 +242,20 @@ protected void taskOperation( } if (ids.contains(transformTask.getTransformId())) { + if (request.isForce()) { + // If force==true, we skip the additional step (setShouldStopAtCheckpoint) and move directly to shutting down the task. + // This way we ensure that the persistent task is removed ASAP (as opposed to being removed in one of the listeners). + try { + // Here the task is deregistered in scheduler and marked as completed in persistent task service. + transformTask.shutdown(); + // Here the indexer is aborted so that its thread finishes work ASAP. + transformTask.onCancelled(); + listener.onResponse(new Response(true)); + } catch (ElasticsearchException ex) { + listener.onFailure(ex); + } + return; + } // move the call to the generic thread pool, so we do not block the network thread threadPool.generic().execute(() -> { transformTask.setShouldStopAtCheckpoint(request.isWaitForCheckpoint(), ActionListener.wrap(r -> { From 15d541f82e4b64c68a380fd9e5041b7804452559 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Mon, 9 Oct 2023 10:13:33 +0100 Subject: [PATCH 059/176] Mute failing ClusterSearchShardsResponseTests/testSerialization (#100483) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Test `ClusterSearchShardsResponseTests/testSerialization` fails with output ``` org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponseTests > testSerialization FAILED |   -- | --   | java.lang.IllegalArgumentException: Node versions can only be inferred before release version 8.10.0 |     | at __randomizedtesting.SeedInfo.seed([8D8D9B5E7417133A:D955FA1653452EA6]:0) |     | at org.elasticsearch.cluster.node.VersionInformation.inferVersions(VersionInformation.java:42) |     | at org.elasticsearch.cluster.node.DiscoveryNodeUtils$Builder.build(DiscoveryNodeUtils.java:152) |     | at org.elasticsearch.cluster.node.DiscoveryNodeUtils.create(DiscoveryNodeUtils.java:39) |     | at org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponseTests.testSerialization(ClusterSearchShardsResponseTests.java:52) ``` Mute it. Relates #100482 --- .../admin/cluster/shards/ClusterSearchShardsResponseTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java index 1922aa4042ebd..59679cc150910 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.cluster.shards; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; @@ -38,6 +39,7 @@ public class ClusterSearchShardsResponseTests extends ESTestCase { + @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100482") public void testSerialization() throws Exception { Map indicesAndFilters = new HashMap<>(); Set nodes = new HashSet<>(); From 05142d2e4cf56e9bcf9368a29e6f11f0bcbd1582 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Mon, 9 Oct 2023 10:30:12 +0100 Subject: [PATCH 060/176] Mute failing InternalEngineTests/testGetWithSearcherWrapper (#100485) Test `InternalEngineTests/testGetWithSearcherWrapper` fails with: ``` org.elasticsearch.index.engine.InternalEngineTests > testGetWithSearcherWrapper FAILED java.lang.AssertionError: expected:<0> but was:<1> at __randomizedtesting.SeedInfo.seed([B8C232CD98C68607:A62E178D70FDEDC1]:0) at org.junit.Assert.fail(Assert.java:88) at org.junit.Assert.failNotEquals(Assert.java:834) at org.junit.Assert.assertEquals(Assert.java:645) at org.junit.Assert.assertEquals(Assert.java:631) at org.elasticsearch.index.engine.InternalEngineTests.testGetWithSearcherWrapper(InternalEngineTests.java:1096) ``` This PR mutes it. relates https://github.com/elastic/elasticsearch/issues/99916 --- .../java/org/elasticsearch/index/engine/InternalEngineTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index aed83cf8abd95..5cec0c889d414 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1039,6 +1039,7 @@ public void testSimpleOperations() throws Exception { searchResult.close(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99916") public void testGetWithSearcherWrapper() throws Exception { engine.refresh("warm_up"); engine.index(indexForDoc(createParsedDoc("1", null))); From ff08e0208d5248418c1bf80eb259d95429c19bc1 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 9 Oct 2023 10:38:59 +0100 Subject: [PATCH 061/176] Reinstate RepositoryData BwC (#100447) This commit moves back to using a `"major.minor.patch"` string for the version field in snapshots stored in `RepositoryData`, using the marker string `"8.11.0"` to allow older versions to filter out newer snapshots and adding a new `index_version` field alongside. This format is fully backwardly-compatible, except that it trips an assertion in the versions of 8.10.x released today. When running without assertions enabled, things work correctly in all versions. Relates #98454 --- docs/changelog/100447.yaml | 5 ++ qa/repository-multi-version/build.gradle | 5 ++ .../MultiVersionRepositoryAccessIT.java | 18 ----- .../repositories/RepositoryData.java | 75 ++++++++++++++++--- .../blobstore/BlobStoreRepository.java | 28 +++++-- .../repositories/RepositoryDataTests.java | 5 +- .../AbstractSnapshotIntegTestCase.java | 25 +++---- 7 files changed, 111 insertions(+), 50 deletions(-) create mode 100644 docs/changelog/100447.yaml diff --git a/docs/changelog/100447.yaml b/docs/changelog/100447.yaml new file mode 100644 index 0000000000000..c20eb1599cf41 --- /dev/null +++ b/docs/changelog/100447.yaml @@ -0,0 +1,5 @@ +pr: 100447 +summary: Reinstate `RepositoryData` BwC +area: Snapshot/Restore +type: bug +issues: [] diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle index 0b9bad2b705ce..80d316536e09e 100644 --- a/qa/repository-multi-version/build.gradle +++ b/qa/repository-multi-version/build.gradle @@ -29,6 +29,11 @@ BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> numberOfNodes = 2 setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' + if (v.equals('8.10.0') || v.equals('8.10.1') || v.equals('8.10.2')) { + // 8.10.x versions contain a bogus assertion that trips when reading repositories touched by newer versions + // see https://github.com/elastic/elasticsearch/issues/98454 for details + jvmArgs '-da' + } } } diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index db99087b07140..a7dad1ebeec50 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.upgrades; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -78,8 +77,6 @@ public static TestStep parse(String value) { private static final TestStep TEST_STEP = TestStep.parse(System.getProperty("tests.rest.suite")); - private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); - @Override protected boolean preserveSnapshotsUponCompletion() { return true; @@ -96,11 +93,6 @@ protected boolean preserveTemplatesUponCompletion() { } public void testCreateAndRestoreSnapshot() throws IOException { - assumeTrue( - "test does not work for downgrades before 8.10.0, see https://github.com/elastic/elasticsearch/issues/98454", - OLD_CLUSTER_VERSION.onOrAfter(Version.V_8_10_0) - ); - final String repoName = getTestName(); try { final int shards = 3; @@ -147,11 +139,6 @@ public void testCreateAndRestoreSnapshot() throws IOException { } public void testReadOnlyRepo() throws IOException { - assumeTrue( - "test does not fully work for downgrades before 8.10.0, see https://github.com/elastic/elasticsearch/issues/98454", - OLD_CLUSTER_VERSION.onOrAfter(Version.V_8_10_0) || TEST_STEP != TestStep.STEP3_OLD_CLUSTER - ); - final String repoName = getTestName(); final int shards = 3; final boolean readOnly = TEST_STEP.ordinal() > 1; // only restore from read-only repo in steps 3 and 4 @@ -185,11 +172,6 @@ public void testReadOnlyRepo() throws IOException { ); public void testUpgradeMovesRepoToNewMetaVersion() throws IOException { - assumeTrue( - "test does not work for downgrades before 8.10.0, see https://github.com/elastic/elasticsearch/issues/98454", - OLD_CLUSTER_VERSION.onOrAfter(Version.V_8_10_0) - ); - final String repoName = getTestName(); try { final int shards = 3; diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index 4f8cc64655c70..858f6f0706fd7 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -13,6 +13,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; @@ -286,6 +287,7 @@ public boolean hasMissingDetails(SnapshotId snapshotId) { final SnapshotDetails snapshotDetails = getSnapshotDetails(snapshotId); return snapshotDetails == null || snapshotDetails.getVersion() == null + || snapshotDetails.getVersion().id() == NUMERIC_INDEX_VERSION_MARKER.id() || snapshotDetails.getStartTimeMillis() == -1 || snapshotDetails.getEndTimeMillis() == -1 || snapshotDetails.getSlmPolicy() == null; @@ -641,6 +643,7 @@ public Map resolveNewIndices(List indicesToResolve, Map private static final String CLUSTER_UUID = "cluster_id"; private static final String STATE = "state"; private static final String VERSION = "version"; + private static final String INDEX_VERSION = "index_version"; private static final String MIN_VERSION = "min_version"; private static final String START_TIME_MILLIS = "start_time_millis"; private static final String END_TIME_MILLIS = "end_time_millis"; @@ -653,6 +656,13 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final return snapshotsToXContent(builder, repoMetaVersion, false); } + /** + * From 8.11.0 onwards we use numeric index versions, but leave the string "8.11.0" in the old version field for bwc. + * See #98454 for details. + */ + private static final IndexVersion NUMERIC_INDEX_VERSION_MARKER = IndexVersion.fromId(8_11_00_99); + private static final String NUMERIC_INDEX_VERSION_MARKER_STRING = "8.11.0"; + /** * Writes the snapshots metadata and the related indices metadata to x-content. * @param permitMissingUuid indicates whether we permit the repository- and cluster UUIDs to be missing, @@ -680,10 +690,20 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final } else { minVersion = SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION; } + // Note that all known versions expect the MIN_VERSION field to be a string, and versions before 8.11.0 try and parse it as a + // major.minor.patch version number, so if we introduce a numeric format version in future then this will cause them to fail + // with an opaque parse error rather than the more helpful: + // + // IllegalStateException: this snapshot repository format requires Elasticsearch version [x.y.z] or later + // + // Likewise if we simply encode the numeric IndexVersion as a string then versions from 8.11.0 onwards will report the exact + // string in this message, which is not especially helpful to users. Slightly more helpful than the opaque parse error reported + // by earlier versions, but still not great. TODO rethink this if and when adding a new snapshot repository format version. if (minVersion.before(IndexVersion.V_8_10_0)) { // write as a string builder.field(MIN_VERSION, Version.fromId(minVersion.id()).toString()); } else { + assert false : "writing a numeric version [" + minVersion + "] is unhelpful here, see preceding comment"; // write an int builder.field(MIN_VERSION, minVersion.id()); } @@ -721,6 +741,9 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final // write the snapshots list + int numericIndexVersionMarkerPlaceholdersUsed = 0; + SnapshotId lastSnapshotWithNumericIndexVersionPlaceholder = null; + builder.startArray(SNAPSHOTS); for (final SnapshotId snapshot : getSnapshotIds()) { builder.startObject(); @@ -742,10 +765,16 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final } final IndexVersion version = snapshotDetails.getVersion(); if (version != null) { - if (version.before(IndexVersion.V_8_10_0)) { - builder.field(VERSION, Version.fromId(version.id()).toString()); + if (version.equals(NUMERIC_INDEX_VERSION_MARKER)) { + numericIndexVersionMarkerPlaceholdersUsed += 1; + lastSnapshotWithNumericIndexVersionPlaceholder = snapshot; + builder.field(VERSION, NUMERIC_INDEX_VERSION_MARKER_STRING); + } else if (version.onOrAfter(IndexVersion.V_8_500_000)) { + builder.field(VERSION, NUMERIC_INDEX_VERSION_MARKER_STRING); + builder.field(INDEX_VERSION, version.id()); } else { - builder.field(VERSION, version.id()); + assert version.id() < NUMERIC_INDEX_VERSION_MARKER.id() : version; // versions between 8.10.last and 8_500_000 invalid + builder.field(VERSION, Version.fromId(version.id()).toString()); } } @@ -763,6 +792,18 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final } builder.endArray(); + if (numericIndexVersionMarkerPlaceholdersUsed > 0) { + // This shouldn't happen without other failures - we might see the 8.11.0 marker if the RepositoryData was previously written by + // a pre-8.11.0 version which does not know to write the INDEX_VERSION field, but in that case we will reload the correct + // version from SnapshotInfo before writing the new RepositoryData; this reload process is technically a best-effort thing so we + // must tolerate the case where it fails, but we can report the problem at least. + logger.warn( + "created RepositoryData with [{}] snapshot(s) using a placeholder version of '8.11.0', including [{}]", + numericIndexVersionMarkerPlaceholdersUsed, + lastSnapshotWithNumericIndexVersionPlaceholder + ); + } + // write the indices map builder.startObject(INDICES); for (final IndexId indexId : getIndices().values()) { @@ -824,14 +865,22 @@ public static RepositoryData snapshotsFromXContent(XContentParser parser, long g indexMetaIdentifiers = parser.mapStrings(); } case MIN_VERSION -> { - IndexVersion version = parseIndexVersion(parser.nextToken(), parser); + final var token = parser.nextToken(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_STRING, token, parser); + final var versionString = parser.text(); + final var version = switch (versionString) { + case "7.12.0" -> IndexVersion.V_7_12_0; + case "7.9.0" -> IndexVersion.V_7_9_0; + case "7.6.0" -> IndexVersion.V_7_6_0; + default -> + // All (known) versions only ever emit one of the above strings for the format version, so if we see something + // else it must be a newer version or else something wholly invalid. Report the raw string rather than trying + // to parse it. + throw new IllegalStateException(Strings.format(""" + this snapshot repository format requires Elasticsearch version [%s] or later""", versionString)); + }; assert SnapshotsService.useShardGenerations(version); - if (version.after(IndexVersion.current())) { - throw new IllegalStateException( - "this snapshot repository format requires Elasticsearch version [" + version + "] or later" - ); - } } case UUID -> { XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_STRING, parser.nextToken(), parser); @@ -917,6 +966,7 @@ private static void parseSnapshots( SnapshotState state = null; Map metaGenerations = null; IndexVersion version = null; + IndexVersion indexVersion = null; long startTimeMillis = -1; long endTimeMillis = -1; String slmPolicy = null; @@ -932,6 +982,7 @@ private static void parseSnapshots( p -> stringDeduplicator.computeIfAbsent(p.text(), Function.identity()) ); case VERSION -> version = parseIndexVersion(token, parser); + case INDEX_VERSION -> indexVersion = IndexVersion.fromId(parser.intValue()); case START_TIME_MILLIS -> { assert startTimeMillis == -1; startTimeMillis = parser.longValue(); @@ -945,6 +996,9 @@ private static void parseSnapshots( } assert (startTimeMillis == -1) == (endTimeMillis == -1) : "unexpected: " + startTimeMillis + ", " + endTimeMillis + ", "; final SnapshotId snapshotId = new SnapshotId(name, uuid); + if (indexVersion != null) { + version = indexVersion; + } if (state != null || version != null) { snapshotsDetails.put(uuid, new SnapshotDetails(state, version, startTimeMillis, endTimeMillis, slmPolicy)); } @@ -963,6 +1017,9 @@ private static IndexVersion parseIndexVersion(XContentParser.Token token, XConte } else { XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_STRING, token, parser); final var versionStr = parser.text(); + if (NUMERIC_INDEX_VERSION_MARKER_STRING.equals(versionStr)) { + return NUMERIC_INDEX_VERSION_MARKER; + } final var versionId = Version.fromString(versionStr).id; if (versionId > 8_11_00_99 && versionId < 8_500_000) { logger.error("found impossible string index version [{}] with id [{}]", versionStr, versionId); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 33e682354c9cc..0825337143f8e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -932,15 +932,27 @@ private void doDeleteShardSnapshots( })) { // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion cleanupUnlinkedRootAndIndicesBlobs(snapshotIds, foundIndices, rootBlobs, newRepoData, refs.acquireListener()); - writeUpdatedShardMetaDataAndComputeDeletes( - snapshotIds, - repositoryData, - false, - refs.acquireListener() - .delegateFailure( - (l, deleteResults) -> asyncCleanupUnlinkedShardLevelBlobs(repositoryData, snapshotIds, deleteResults, l) + + // writeIndexGen finishes on master-service thread so must fork here. + threadPool.executor(ThreadPool.Names.SNAPSHOT) + .execute( + ActionRunnable.wrap( + refs.acquireListener(), + l0 -> writeUpdatedShardMetaDataAndComputeDeletes( + snapshotIds, + repositoryData, + false, + l0.delegateFailure( + (l, deleteResults) -> asyncCleanupUnlinkedShardLevelBlobs( + repositoryData, + snapshotIds, + deleteResults, + l + ) + ) + ) ) - ); + ); } }, listener::onFailure)); } diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java index 6c1459640386f..45e6750ceb0d3 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.Maps; @@ -408,12 +409,12 @@ public void testIndexMetaDataToRemoveAfterRemovingSnapshotWithSharing() { } public void testFailsIfMinVersionNotSatisfied() throws IOException { - final IndexVersion futureVersion = IndexVersion.fromId(IndexVersion.current().id() + 1_000_000); + final String futureVersion = Version.fromId(IndexVersion.current().id() + 1_000_000).toString(); final XContentBuilder builder = XContentBuilder.builder(randomFrom(XContentType.JSON).xContent()); builder.startObject(); { - builder.field("min_version", futureVersion.id()); + builder.field("min_version", futureVersion); builder.field("junk", "should not get this far"); } builder.endObject(); diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index a675cad6ed670..0eeb717ec22be 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -365,15 +365,6 @@ protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) t } } - private static String versionString(IndexVersion version) { - if (version.before(IndexVersion.V_8_9_0)) { - // add back the "" for a json String - return "\"" + Version.fromId(version.id()) + "\""; - } else { - return version.toString(); - } - } - /** * Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard * generations (the existence of which would short-circuit checks for the repo containing old version snapshots) @@ -392,11 +383,19 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, IndexVe final RepositoryData repositoryData = getRepositoryData(repoName, version); final XContentBuilder jsonBuilder = JsonXContent.contentBuilder(); repositoryData.snapshotsToXContent(jsonBuilder, version); + final var currentVersionString = Strings.toString(jsonBuilder); + final String oldVersionString; + if (version.onOrAfter(IndexVersion.V_8_500_000)) { + oldVersionString = currentVersionString.replace( + ",\"index_version\":" + IndexVersion.current(), + ",\"index_version\":" + version + ); + } else { + oldVersionString = currentVersionString.replace(",\"index_version\":" + IndexVersion.current(), "") + .replace(",\"version\":\"8.11.0\"", ",\"version\":\"" + Version.fromId(version.id()) + "\""); + } final RepositoryData downgradedRepoData = RepositoryData.snapshotsFromXContent( - JsonXContent.jsonXContent.createParser( - XContentParserConfiguration.EMPTY, - Strings.toString(jsonBuilder).replace(IndexVersion.current().toString(), versionString(version)) - ), + JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, oldVersionString), repositoryData.getGenId(), randomBoolean() ); From 0142a0d25436b0bcc3b158411cfc99847e51790a Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Mon, 9 Oct 2023 10:41:57 +0100 Subject: [PATCH 062/176] Mute failing ShardGetServiceTests/testGetFromTranslogWithLongSourceMappingOptionsAndStoredFields (#100488) Test `ShardGetServiceTests/testGetFromTranslogWithLongSourceMappingOptionsAndStoredFields` fails with: ``` org.elasticsearch.index.shard.ShardGetServiceTests > testGetFromTranslogWithLongSourceMappingOptionsAndStoredFields FAILED java.lang.AssertionError: expected:<0> but was:<1> at __randomizedtesting.SeedInfo.seed([61581A6063309088:E3CD1213503CCA95]:0) at org.junit.Assert.fail(Assert.java:88) at org.junit.Assert.failNotEquals(Assert.java:834) at org.junit.Assert.assertEquals(Assert.java:645) at org.junit.Assert.assertEquals(Assert.java:631) at org.elasticsearch.index.shard.ShardGetServiceTests.runGetFromTranslogWithOptions(ShardGetServiceTests.java:165) at org.elasticsearch.index.shard.ShardGetServiceTests.testGetFromTranslogWithLongSourceMappingOptionsAndStoredFields(ShardGetServiceTests.java:104) ``` Mute it. Relates #100487 --- .../java/org/elasticsearch/index/shard/ShardGetServiceTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index f099fa657b89c..48f86b2ad82a4 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -95,6 +95,7 @@ public void testGetFromTranslogWithStringSourceMappingOptionsAndStoredFields() t runGetFromTranslogWithOptions(docToIndex, sourceOptions, noSource ? "" : "{\"bar\":\"bar\"}", "\"text\"", "foo", false); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100487") public void testGetFromTranslogWithLongSourceMappingOptionsAndStoredFields() throws IOException { String docToIndex = """ {"foo" : 7, "bar" : 42} From 525fe59ee20bc91ec1ef2a05ba33c18c1d27024a Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 9 Oct 2023 21:01:20 +1100 Subject: [PATCH 063/176] Make APM meter available in s3 blobstore (#100464) This PR wires the new Meter interface into S3BlobStore. The new meter field remains unused in this PR. Actual metric collection will be addressed in follow-ups. Relates: ES-6801 --- .../repositories/azure/AzureRepository.java | 4 +++- .../gcs/GoogleCloudStorageRepository.java | 4 +++- .../s3/S3BlobStoreRepositoryTests.java | 3 ++- .../s3/S3RepositoryThirdPartyTests.java | 4 +++- .../repositories/s3/S3BlobStore.java | 6 +++++- .../repositories/s3/S3Repository.java | 20 ++++++++++++++++--- .../repositories/s3/S3RepositoryPlugin.java | 5 ++++- .../s3/RepositoryCredentialsTests.java | 3 ++- .../s3/S3BlobContainerRetriesTests.java | 4 +++- .../repositories/s3/S3RepositoryTests.java | 4 +++- .../blobstore/MeteredBlobStoreRepository.java | 6 +++++- .../RepositoriesServiceTests.java | 7 +++++-- 12 files changed, 55 insertions(+), 15 deletions(-) diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 388474acc75ea..36742765edad5 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.util.Locale; @@ -107,7 +108,8 @@ public AzureRepository( bigArrays, recoverySettings, buildBasePath(metadata), - buildLocation(metadata) + buildLocation(metadata), + Meter.NOOP ); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.storageService = storageService; diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index e2338371cf837..a42839c6d0174 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -21,6 +21,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.util.Map; @@ -76,7 +77,8 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { bigArrays, recoverySettings, buildBasePath(metadata), - buildLocation(metadata) + buildLocation(metadata), + Meter.NOOP ); this.storageService = storageService; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 37f1a9e6ff78e..72a6fa1026555 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.snapshots.mockstore.BlobStoreWrapper; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -355,7 +356,7 @@ protected S3Repository createRepository( BigArrays bigArrays, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings) { + return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings, Meter.NOOP) { @Override public BlobStore blobStore() { diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 87613b0e8f6a1..0e2e38a5af224 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -111,7 +112,8 @@ public long absoluteTimeInMillis() { node().injector().getInstance(PluginsService.class).filterPlugins(S3RepositoryPlugin.class).get(0).getService(), ClusterServiceUtils.createClusterService(threadpool), BigArrays.NON_RECYCLING_INSTANCE, - new RecoverySettings(node().settings(), node().injector().getInstance(ClusterService.class).getClusterSettings()) + new RecoverySettings(node().settings(), node().injector().getInstance(ClusterService.class).getClusterSettings()), + Meter.NOOP ) ) { repository.start(); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index aab72e712136c..f371d6f354763 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -77,6 +78,7 @@ class S3BlobStore implements BlobStore { private final ThreadPool threadPool; private final Executor snapshotExecutor; + private final Meter meter; private final StatsCollectors statsCollectors = new StatsCollectors(); @@ -89,7 +91,8 @@ class S3BlobStore implements BlobStore { String storageClass, RepositoryMetadata repositoryMetadata, BigArrays bigArrays, - ThreadPool threadPool + ThreadPool threadPool, + Meter meter ) { this.service = service; this.bigArrays = bigArrays; @@ -101,6 +104,7 @@ class S3BlobStore implements BlobStore { this.repositoryMetadata = repositoryMetadata; this.threadPool = threadPool; this.snapshotExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + this.meter = meter; } RequestMetricCollector getMetricCollector(Operation operation, OperationPurpose purpose) { diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 7dddf07d0f23e..5a4b5b3a313d8 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -37,6 +37,7 @@ import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotsService; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -205,7 +206,8 @@ class S3Repository extends MeteredBlobStoreRepository { final S3Service service, final ClusterService clusterService, final BigArrays bigArrays, - final RecoverySettings recoverySettings + final RecoverySettings recoverySettings, + final Meter meter ) { super( metadata, @@ -214,7 +216,8 @@ class S3Repository extends MeteredBlobStoreRepository { bigArrays, recoverySettings, buildBasePath(metadata), - buildLocation(metadata) + buildLocation(metadata), + meter ); this.service = service; this.snapshotExecutor = threadPool().executor(ThreadPool.Names.SNAPSHOT); @@ -408,7 +411,18 @@ private static BlobPath buildBasePath(RepositoryMetadata metadata) { @Override protected S3BlobStore createBlobStore() { - return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, metadata, bigArrays, threadPool); + return new S3BlobStore( + service, + bucket, + serverSideEncryption, + bufferSize, + cannedACL, + storageClass, + metadata, + bigArrays, + threadPool, + meter + ); } // only use for testing diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 62fe279726164..ca1dcfae879b1 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -32,6 +32,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.script.ScriptService; import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -68,6 +69,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo } private final SetOnce service = new SetOnce<>(); + private final SetOnce meter = new SetOnce<>(); private final Settings settings; public S3RepositoryPlugin(Settings settings) { @@ -86,7 +88,7 @@ protected S3Repository createRepository( final BigArrays bigArrays, final RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service.get(), clusterService, bigArrays, recoverySettings); + return new S3Repository(metadata, registry, service.get(), clusterService, bigArrays, recoverySettings, meter.get()); } @Override @@ -108,6 +110,7 @@ public Collection createComponents( ) { service.set(s3Service(environment, clusterService.getSettings())); this.service.get().refreshAndClearCache(S3ClientSettings.load(settings)); + meter.set(telemetryProvider.getMeter()); return List.of(service); } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index cc3cddda24917..2cdcc111b01a6 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -263,7 +264,7 @@ protected S3Repository createRepository( BigArrays bigArrays, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings) { + return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings, Meter.NOOP) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index b0f443964e03a..a48fd2474bc59 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.blobstore.AbstractBlobContainerRetriesTestCase; +import org.elasticsearch.telemetry.metric.Meter; import org.hamcrest.Matcher; import org.junit.After; import org.junit.Before; @@ -156,7 +157,8 @@ protected BlobContainer createBlobContainer( S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), repositoryMetadata, BigArrays.NON_RECYCLING_INSTANCE, - new DeterministicTaskQueue().getThreadPool() + new DeterministicTaskQueue().getThreadPool(), + Meter.NOOP ) ) { @Override diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index aecccba2330f4..c38c8b764af41 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.hamcrest.Matchers; @@ -127,7 +128,8 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { new DummyS3Service(Mockito.mock(Environment.class)), BlobStoreTestUtil.mockClusterService(), MockBigArrays.NON_RECYCLING_INSTANCE, - new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + Meter.NOOP ) { @Override protected void assertSnapshotOrGenericThread() { diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java index c5ea99b0e5c14..c69270011fc7b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java @@ -16,6 +16,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.repositories.RepositoryInfo; import org.elasticsearch.repositories.RepositoryStatsSnapshot; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -23,6 +24,7 @@ public abstract class MeteredBlobStoreRepository extends BlobStoreRepository { private final RepositoryInfo repositoryInfo; + protected final Meter meter; public MeteredBlobStoreRepository( RepositoryMetadata metadata, @@ -31,9 +33,11 @@ public MeteredBlobStoreRepository( BigArrays bigArrays, RecoverySettings recoverySettings, BlobPath basePath, - Map location + Map location, + Meter meter ) { super(metadata, namedXContentRegistry, clusterService, bigArrays, recoverySettings, basePath); + this.meter = meter; ThreadPool threadPool = clusterService.getClusterApplierService().threadPool(); this.repositoryInfo = new RepositoryInfo( UUIDs.randomBase64UUID(), diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 95389b5632613..ba2dca8f1083e 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.telemetry.metric.Meter; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -481,7 +482,8 @@ private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clust MockBigArrays.NON_RECYCLING_INSTANCE, mock(RecoverySettings.class), BlobPath.EMPTY, - Map.of("bucket", "bucket-a") + Map.of("bucket", "bucket-a"), + Meter.NOOP ); } @@ -508,7 +510,8 @@ private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clust MockBigArrays.NON_RECYCLING_INSTANCE, mock(RecoverySettings.class), BlobPath.EMPTY, - Map.of("bucket", "bucket-b") + Map.of("bucket", "bucket-b"), + Meter.NOOP ); } From 4384fa8519e009c59384ce23920f936254245f4c Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 9 Oct 2023 12:07:24 +0200 Subject: [PATCH 064/176] Cleanup+Dry up Percentile Rank Aggregation Iterators (#100476) Just a random find from researching something else: * Percentile can be made a record, the equals method stays compatible * We had a duplicate iterator implementation, we can dry this one and the parent up and make it more efficient by using the mapped iterator utility --- .../metrics/HDRPercentileRanksIT.java | 16 ++++---- .../metrics/HDRPercentilesIT.java | 10 ++--- .../metrics/TDigestPercentileRanksIT.java | 12 +++--- .../metrics/TDigestPercentilesIT.java | 10 ++--- .../pipeline/PercentilesBucketIT.java | 10 ++--- .../common/collect/Iterators.java | 4 ++ .../metrics/ParsedHDRPercentileRanks.java | 18 --------- .../metrics/ParsedPercentileRanks.java | 9 +++++ .../metrics/ParsedPercentiles.java | 16 +------- .../metrics/ParsedTDigestPercentileRanks.java | 18 --------- .../aggregations/metrics/Percentile.java | 38 +------------------ .../support/AggregationInspectionHelper.java | 2 +- .../metrics/AbstractPercentilesTestCase.java | 2 +- .../HDRPercentileRanksAggregatorTests.java | 14 +++---- .../metrics/InternalHDRPercentilesTests.java | 6 +-- .../InternalPercentilesRanksTestCase.java | 2 +- .../metrics/InternalPercentilesTestCase.java | 2 +- .../InternalTDigestPercentilesTests.java | 6 +-- ...TDigestPercentileRanksAggregatorTests.java | 14 +++---- .../InternalPercentilesBucketTests.java | 10 ++--- ...regatedPercentileRanksAggregatorTests.java | 14 +++---- ...regatedPercentileRanksAggregatorTests.java | 10 ++--- .../evaluation/common/AbstractAucRoc.java | 4 +- .../AggregationToJsonProcessor.java | 2 +- .../aggregation/AggregationTestUtils.java | 4 +- .../pivot/AggregationResultUtils.java | 6 +-- 26 files changed, 93 insertions(+), 166 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index db13078c8a5a2..0bdd75f73e743 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -77,22 +77,22 @@ private void assertConsistent(double[] pcts, PercentileRanks values, long minVal assertEquals(pcts.length, percentileList.size()); for (int i = 0; i < pcts.length; ++i) { final Percentile percentile = percentileList.get(i); - assertThat(percentile.getValue(), equalTo(pcts[i])); - assertThat(percentile.getPercent(), greaterThanOrEqualTo(0.0)); - assertThat(percentile.getPercent(), lessThanOrEqualTo(100.0)); + assertThat(percentile.value(), equalTo(pcts[i])); + assertThat(percentile.percent(), greaterThanOrEqualTo(0.0)); + assertThat(percentile.percent(), lessThanOrEqualTo(100.0)); - if (percentile.getPercent() == 0) { + if (percentile.percent() == 0) { double allowedError = minValue / Math.pow(10, numberSigDigits); - assertThat(percentile.getValue(), lessThanOrEqualTo(minValue + allowedError)); + assertThat(percentile.value(), lessThanOrEqualTo(minValue + allowedError)); } - if (percentile.getPercent() == 100) { + if (percentile.percent() == 100) { double allowedError = maxValue / Math.pow(10, numberSigDigits); - assertThat(percentile.getValue(), greaterThanOrEqualTo(maxValue - allowedError)); + assertThat(percentile.value(), greaterThanOrEqualTo(maxValue - allowedError)); } } for (int i = 1; i < percentileList.size(); ++i) { - assertThat(percentileList.get(i).getValue(), greaterThanOrEqualTo(percentileList.get(i - 1).getValue())); + assertThat(percentileList.get(i).value(), greaterThanOrEqualTo(percentileList.get(i - 1).value())); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index e6bda97cd6918..58a22a32ae13b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -80,22 +80,22 @@ private void assertConsistent(double[] pcts, Percentiles percentiles, long minVa assertEquals(pcts.length, percentileList.size()); for (int i = 0; i < pcts.length; ++i) { final Percentile percentile = percentileList.get(i); - assertThat(percentile.getPercent(), equalTo(pcts[i])); - double value = percentile.getValue(); + assertThat(percentile.percent(), equalTo(pcts[i])); + double value = percentile.value(); double allowedError = value / Math.pow(10, numberSigDigits); assertThat(value, greaterThanOrEqualTo(minValue - allowedError)); assertThat(value, lessThanOrEqualTo(maxValue + allowedError)); - if (percentile.getPercent() == 0) { + if (percentile.percent() == 0) { assertThat(value, closeTo(minValue, allowedError)); } - if (percentile.getPercent() == 100) { + if (percentile.percent() == 100) { assertThat(value, closeTo(maxValue, allowedError)); } } for (int i = 1; i < percentileList.size(); ++i) { - assertThat(percentileList.get(i).getValue(), greaterThanOrEqualTo(percentileList.get(i - 1).getValue())); + assertThat(percentileList.get(i).value(), greaterThanOrEqualTo(percentileList.get(i - 1).value())); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index fc1e9028ba91a..6909bd719f6dd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -80,17 +80,17 @@ private void assertConsistent(double[] pcts, PercentileRanks values, long minVal assertEquals(pcts.length, percentileList.size()); for (int i = 0; i < pcts.length; ++i) { final Percentile percentile = percentileList.get(i); - assertThat(percentile.getValue(), equalTo(pcts[i])); - assertThat(percentile.getPercent(), greaterThanOrEqualTo(0.0)); - assertThat(percentile.getPercent(), lessThanOrEqualTo(100.0)); + assertThat(percentile.value(), equalTo(pcts[i])); + assertThat(percentile.percent(), greaterThanOrEqualTo(0.0)); + assertThat(percentile.percent(), lessThanOrEqualTo(100.0)); - if (percentile.getPercent() == 0) { - assertThat(percentile.getValue(), lessThanOrEqualTo((double) minValue)); + if (percentile.percent() == 0) { + assertThat(percentile.value(), lessThanOrEqualTo((double) minValue)); } } for (int i = 1; i < percentileList.size(); ++i) { - assertThat(percentileList.get(i).getValue(), greaterThanOrEqualTo(percentileList.get(i - 1).getValue())); + assertThat(percentileList.get(i).value(), greaterThanOrEqualTo(percentileList.get(i - 1).value())); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index 05e67ad61a71d..3fc5bf9863256 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -82,21 +82,21 @@ private void assertConsistent(double[] pcts, Percentiles percentiles, long minVa assertEquals(pcts.length, percentileList.size()); for (int i = 0; i < pcts.length; ++i) { final Percentile percentile = percentileList.get(i); - assertThat(percentile.getPercent(), equalTo(pcts[i])); - double value = percentile.getValue(); + assertThat(percentile.percent(), equalTo(pcts[i])); + double value = percentile.value(); assertThat(value, greaterThanOrEqualTo((double) minValue)); assertThat(value, lessThanOrEqualTo((double) maxValue)); - if (percentile.getPercent() == 0) { + if (percentile.percent() == 0) { assertThat(value, equalTo((double) minValue)); } - if (percentile.getPercent() == 100) { + if (percentile.percent() == 100) { assertThat(value, equalTo((double) maxValue)); } } for (int i = 1; i < percentileList.size(); ++i) { - assertThat(percentileList.get(i).getValue(), greaterThanOrEqualTo(percentileList.get(i - 1).getValue())); + assertThat(percentileList.get(i).value(), greaterThanOrEqualTo(percentileList.get(i - 1).value())); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java index 839d4321c7eb0..53ebbfc0bb016 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -257,12 +257,12 @@ public void testNestedWithDecimal() throws Exception { private void assertPercentileBucket(double[] values, PercentilesBucket percentiles) { for (Percentile percentile : percentiles) { - assertEquals(percentiles.percentile(percentile.getPercent()), percentile.getValue(), 0d); + assertEquals(percentiles.percentile(percentile.percent()), percentile.value(), 0d); if (values.length == 0) { - assertThat(percentile.getValue(), equalTo(Double.NaN)); + assertThat(percentile.value(), equalTo(Double.NaN)); } else { - int index = (int) Math.round((percentile.getPercent() / 100.0) * (values.length - 1)); - assertThat(percentile.getValue(), equalTo(values[index])); + int index = (int) Math.round((percentile.percent() / 100.0) * (values.length - 1)); + assertThat(percentile.value(), equalTo(values[index])); } } } @@ -271,7 +271,7 @@ private void assertPercentileBucket(double[] percents, double[] values, Percenti Iterator it = percentiles.iterator(); for (int i = 0; i < percents.length; ++i) { assertTrue(it.hasNext()); - assertEquals(percents[i], it.next().getPercent(), 0d); + assertEquals(percents[i], it.next().percent(), 0d); } assertFalse(it.hasNext()); assertPercentileBucket(values, percentiles); diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index ea6256a5b0717..d7c63edac2c94 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -153,8 +153,12 @@ public T next() { } } + @SuppressWarnings({ "rawtypes", "unchecked" }) public static Iterator map(Iterator input, Function fn) { if (input.hasNext()) { + if (input instanceof MapIterator mapIterator) { + return new MapIterator<>(mapIterator.input, mapIterator.fn.andThen(fn)); + } return new MapIterator<>(input, fn); } else { return Collections.emptyIterator(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentileRanks.java index 593df3b032cee..22834ca9b2bdc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedHDRPercentileRanks.java @@ -12,7 +12,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Iterator; public class ParsedHDRPercentileRanks extends ParsedPercentileRanks { @@ -21,23 +20,6 @@ public String getType() { return InternalHDRPercentileRanks.NAME; } - @Override - public Iterator iterator() { - final Iterator iterator = super.iterator(); - return new Iterator() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public Percentile next() { - Percentile percentile = iterator.next(); - return new Percentile(percentile.getValue(), percentile.getPercent()); - } - }; - } - private static final ObjectParser PARSER = new ObjectParser<>( ParsedHDRPercentileRanks.class.getSimpleName(), true, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java index 9d70997b8f557..0bf317c36be16 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentileRanks.java @@ -8,6 +8,10 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.common.collect.Iterators; + +import java.util.Iterator; + abstract class ParsedPercentileRanks extends ParsedPercentiles implements PercentileRanks { @Override @@ -29,4 +33,9 @@ public double value(String name) { public Iterable valueNames() { return percentiles.keySet().stream().map(d -> d.toString()).toList(); } + + @Override + public Iterator iterator() { + return Iterators.map(super.iterator(), percentile -> new Percentile(percentile.value(), percentile.percent())); + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java index 91c7b1e3224ac..d1b0f03904ef9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedPercentiles.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.XContentBuilder; @@ -59,20 +60,7 @@ void setKeyed(boolean keyed) { @Override public Iterator iterator() { - return new Iterator() { - final Iterator> iterator = percentiles.entrySet().iterator(); - - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public Percentile next() { - Map.Entry next = iterator.next(); - return new Percentile(next.getKey(), next.getValue()); - } - }; + return Iterators.map(percentiles.entrySet().iterator(), next -> new Percentile(next.getKey(), next.getValue())); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentileRanks.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentileRanks.java index d2ed1630578d4..29858a430de89 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentileRanks.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ParsedTDigestPercentileRanks.java @@ -12,7 +12,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Iterator; public class ParsedTDigestPercentileRanks extends ParsedPercentileRanks { @@ -21,23 +20,6 @@ public String getType() { return InternalTDigestPercentileRanks.NAME; } - @Override - public Iterator iterator() { - final Iterator iterator = super.iterator(); - return new Iterator() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public Percentile next() { - Percentile percentile = iterator.next(); - return new Percentile(percentile.getValue(), percentile.getPercent()); - } - }; - } - private static final ObjectParser PARSER = new ObjectParser<>( ParsedTDigestPercentileRanks.class.getSimpleName(), true, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Percentile.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Percentile.java index 2cedf128b4ab6..2cb3e0be7e5dd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Percentile.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/Percentile.java @@ -8,40 +8,4 @@ package org.elasticsearch.search.aggregations.metrics; -import java.util.Objects; - -public class Percentile { - - private final double percent; - private final double value; - - public Percentile(double percent, double value) { - this.percent = percent; - this.value = value; - } - - public double getPercent() { - return percent; - } - - public double getValue() { - return value; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Percentile that = (Percentile) o; - return Double.compare(that.percent, percent) == 0 && Double.compare(that.value, value) == 0; - } - - @Override - public int hashCode() { - return Objects.hash(percent, value); - } -} +public record Percentile(double percent, double value) {} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java index 96e8408387d19..0e1a9d748db9c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java @@ -224,7 +224,7 @@ public static boolean hasValue(InternalBucketMetricValue agg) { } public static boolean hasValue(InternalPercentilesBucket agg) { - return StreamSupport.stream(agg.spliterator(), false).allMatch(p -> Double.isNaN(p.getValue())) == false; + return StreamSupport.stream(agg.spliterator(), false).allMatch(p -> Double.isNaN(p.value())) == false; } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesTestCase.java index d5867c8ebefcd..742ebac25f855 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesTestCase.java @@ -110,7 +110,7 @@ public void testEmptyRanksXContent() throws IOException { T agg = createTestInstance("test", Collections.emptyMap(), keyed, docValueFormat, percents, new double[0], false); for (Percentile percentile : agg) { - Double value = percentile.getValue(); + Double value = percentile.value(); assertPercentile(agg, value); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java index 9d200d8c2cf5a..b5b59f4c3327c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java @@ -68,15 +68,15 @@ public void testSimple() throws IOException { PercentileRanks ranks = searchAndReduce(reader, new AggTestConfig(aggBuilder, fieldType)); Iterator rankIterator = ranks.iterator(); Percentile rank = rankIterator.next(); - assertEquals(0.1, rank.getValue(), 0d); - assertThat(rank.getPercent(), Matchers.equalTo(0d)); + assertEquals(0.1, rank.value(), 0d); + assertThat(rank.percent(), Matchers.equalTo(0d)); rank = rankIterator.next(); - assertEquals(0.5, rank.getValue(), 0d); - assertThat(rank.getPercent(), Matchers.greaterThan(0d)); - assertThat(rank.getPercent(), Matchers.lessThan(100d)); + assertEquals(0.5, rank.value(), 0d); + assertThat(rank.percent(), Matchers.greaterThan(0d)); + assertThat(rank.percent(), Matchers.lessThan(100d)); rank = rankIterator.next(); - assertEquals(12, rank.getValue(), 0d); - assertThat(rank.getPercent(), Matchers.equalTo(100d)); + assertEquals(12, rank.value(), 0d); + assertThat(rank.percent(), Matchers.equalTo(100d)); assertFalse(rankIterator.hasNext()); assertTrue(AggregationInspectionHelper.hasValue((InternalHDRPercentileRanks) ranks)); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentilesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentilesTests.java index 1c9cb88ee3e2b..e5580f7cdca68 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentilesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalHDRPercentilesTests.java @@ -101,10 +101,10 @@ public void testIterator() { String percentileName = nameIterator.next(); assertEquals(percent, Double.valueOf(percentileName), 0.0d); - assertEquals(percent, percentile.getPercent(), 0.0d); + assertEquals(percent, percentile.percent(), 0.0d); - assertEquals(aggregation.percentile(percent), percentile.getValue(), 0.0d); - assertEquals(aggregation.value(String.valueOf(percent)), percentile.getValue(), 0.0d); + assertEquals(aggregation.percentile(percent), percentile.value(), 0.0d); + assertEquals(aggregation.value(String.valueOf(percent)), percentile.value(), 0.0d); } assertFalse(iterator.hasNext()); assertFalse(nameIterator.hasNext()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesRanksTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesRanksTestCase.java index 2356e532ab534..86b261ac0eb7f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesRanksTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesRanksTestCase.java @@ -22,7 +22,7 @@ protected final void assertFromXContent(T aggregation, ParsedAggregation parsedA PercentileRanks parsedPercentileRanks = (PercentileRanks) parsedAggregation; for (Percentile percentile : aggregation) { - Double value = percentile.getValue(); + Double value = percentile.value(); assertEquals(aggregation.percent(value), parsedPercentileRanks.percent(value), 0); assertEquals(aggregation.percentAsString(value), parsedPercentileRanks.percentAsString(value)); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesTestCase.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesTestCase.java index f8dfd8be13c64..50edd9212c192 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalPercentilesTestCase.java @@ -23,7 +23,7 @@ protected final void assertFromXContent(T aggregation, ParsedAggregation parsedA Percentiles parsedPercentiles = (Percentiles) parsedAggregation; for (Percentile percentile : aggregation) { - Double percent = percentile.getPercent(); + Double percent = percentile.percent(); assertEquals(aggregation.percentile(percent), parsedPercentiles.percentile(percent), 0); assertEquals(aggregation.percentileAsString(percent), parsedPercentiles.percentileAsString(percent)); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentilesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentilesTests.java index 75f3ee1d918a1..4927ce9be3abb 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentilesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTDigestPercentilesTests.java @@ -142,10 +142,10 @@ public void testIterator() { String percentileName = nameIterator.next(); assertEquals(percent, Double.valueOf(percentileName), 0.0d); - assertEquals(percent, percentile.getPercent(), 0.0d); + assertEquals(percent, percentile.percent(), 0.0d); - assertEquals(aggregation.percentile(percent), percentile.getValue(), 0.0d); - assertEquals(aggregation.value(String.valueOf(percent)), percentile.getValue(), 0.0d); + assertEquals(aggregation.percentile(percent), percentile.value(), 0.0d); + assertEquals(aggregation.value(String.valueOf(percent)), percentile.value(), 0.0d); } assertFalse(iterator.hasNext()); assertFalse(nameIterator.hasNext()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java index 04f316ae15452..cbf9a83c6239d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java @@ -55,8 +55,8 @@ public void testEmpty() throws IOException { try (IndexReader reader = new MultiReader()) { PercentileRanks ranks = searchAndReduce(reader, new AggTestConfig(aggBuilder, fieldType)); Percentile rank = ranks.iterator().next(); - assertEquals(Double.NaN, rank.getPercent(), 0d); - assertEquals(0.5, rank.getValue(), 0d); + assertEquals(Double.NaN, rank.percent(), 0d); + assertEquals(0.5, rank.value(), 0d); assertFalse(AggregationInspectionHelper.hasValue(((InternalTDigestPercentileRanks) ranks))); } } @@ -77,16 +77,16 @@ public void testSimple() throws IOException { PercentileRanks ranks = searchAndReduce(reader, new AggTestConfig(aggBuilder, fieldType)); Iterator rankIterator = ranks.iterator(); Percentile rank = rankIterator.next(); - assertEquals(0.1, rank.getValue(), 0d); + assertEquals(0.1, rank.value(), 0d); // TODO: Fix T-Digest: this assertion should pass but we currently get ~15 // https://github.com/elastic/elasticsearch/issues/14851 // assertThat(rank.getPercent(), Matchers.equalTo(0d)); rank = rankIterator.next(); - assertEquals(0.5, rank.getValue(), 0d); - assertThat(rank.getPercent(), Matchers.greaterThan(0d)); - assertThat(rank.getPercent(), Matchers.lessThan(100d)); + assertEquals(0.5, rank.value(), 0d); + assertThat(rank.percent(), Matchers.greaterThan(0d)); + assertThat(rank.percent(), Matchers.lessThan(100d)); rank = rankIterator.next(); - assertEquals(12, rank.getValue(), 0d); + assertEquals(12, rank.value(), 0d); // TODO: Fix T-Digest: this assertion should pass but we currently get ~59 // https://github.com/elastic/elasticsearch/issues/14851 // assertThat(rank.getPercent(), Matchers.equalTo(100d)); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java index 4145beba1e41d..98f957241629e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalPercentilesBucketTests.java @@ -79,7 +79,7 @@ protected final void assertFromXContent(InternalPercentilesBucket aggregation, P ParsedPercentilesBucket parsedPercentiles = (ParsedPercentilesBucket) parsedAggregation; for (Percentile percentile : aggregation) { - Double percent = percentile.getPercent(); + Double percent = percentile.percent(); assertEquals(aggregation.percentile(percent), parsedPercentiles.percentile(percent), 0); // we cannot ensure we get the same as_string output for Double.NaN values since they are rendered as // null and we don't have a formatted string representation in the rest output @@ -104,10 +104,10 @@ public void testPercentOrder() { Percentile percentile = iterator.next(); String percentileName = nameIterator.next(); - assertEquals(percent, percentile.getPercent(), 0.0d); + assertEquals(percent, percentile.percent(), 0.0d); assertEquals(percent, Double.valueOf(percentileName), 0.0d); - assertEquals(aggregation.percentile(percent), percentile.getValue(), 0.0d); + assertEquals(aggregation.percentile(percent), percentile.value(), 0.0d); } assertFalse(iterator.hasNext()); assertFalse(nameIterator.hasNext()); @@ -220,7 +220,7 @@ protected InternalPercentilesBucket mutateInstance(InternalPercentilesBucket ins private double[] extractPercentiles(InternalPercentilesBucket instance) { List values = new ArrayList<>(); - instance.iterator().forEachRemaining(percentile -> values.add(percentile.getValue())); + instance.iterator().forEachRemaining(percentile -> values.add(percentile.value())); double[] valuesArray = new double[values.size()]; for (int i = 0; i < values.size(); i++) { valuesArray[i] = values.get(i); @@ -230,7 +230,7 @@ private double[] extractPercentiles(InternalPercentilesBucket instance) { private double[] extractPercents(InternalPercentilesBucket instance) { List percents = new ArrayList<>(); - instance.iterator().forEachRemaining(percentile -> percents.add(percentile.getPercent())); + instance.iterator().forEachRemaining(percentile -> percents.add(percentile.percent())); double[] percentArray = new double[percents.size()]; for (int i = 0; i < percents.size(); i++) { percentArray[i] = percents.get(i); diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HDRPreAggregatedPercentileRanksAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HDRPreAggregatedPercentileRanksAggregatorTests.java index 7f9992fe5fd2e..8fce6c145bc22 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HDRPreAggregatedPercentileRanksAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/HDRPreAggregatedPercentileRanksAggregatorTests.java @@ -93,15 +93,15 @@ public void testSimple() throws IOException { PercentileRanks ranks = searchAndReduce(reader, new AggTestConfig(aggBuilder, fieldType)); Iterator rankIterator = ranks.iterator(); Percentile rank = rankIterator.next(); - assertEquals(0.1, rank.getValue(), 0d); - assertThat(rank.getPercent(), Matchers.equalTo(0d)); + assertEquals(0.1, rank.value(), 0d); + assertThat(rank.percent(), Matchers.equalTo(0d)); rank = rankIterator.next(); - assertEquals(0.5, rank.getValue(), 0d); - assertThat(rank.getPercent(), Matchers.greaterThan(0d)); - assertThat(rank.getPercent(), Matchers.lessThan(100d)); + assertEquals(0.5, rank.value(), 0d); + assertThat(rank.percent(), Matchers.greaterThan(0d)); + assertThat(rank.percent(), Matchers.lessThan(100d)); rank = rankIterator.next(); - assertEquals(12, rank.getValue(), 0d); - assertThat(rank.getPercent(), Matchers.equalTo(100d)); + assertEquals(12, rank.value(), 0d); + assertThat(rank.percent(), Matchers.equalTo(100d)); assertFalse(rankIterator.hasNext()); assertTrue(AggregationInspectionHelper.hasValue((InternalHDRPercentileRanks) ranks)); } diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/TDigestPreAggregatedPercentileRanksAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/TDigestPreAggregatedPercentileRanksAggregatorTests.java index c721bc088188d..6c207e837e85c 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/TDigestPreAggregatedPercentileRanksAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/aggregations/metrics/TDigestPreAggregatedPercentileRanksAggregatorTests.java @@ -81,16 +81,16 @@ public void testSimple() throws IOException { PercentileRanks ranks = searchAndReduce(reader, new AggTestConfig(aggBuilder, fieldType)); Iterator rankIterator = ranks.iterator(); Percentile rank = rankIterator.next(); - assertEquals(0.1, rank.getValue(), 0d); + assertEquals(0.1, rank.value(), 0d); // TODO: Fix T-Digest: this assertion should pass but we currently get ~15 // https://github.com/elastic/elasticsearch/issues/14851 // assertThat(rank.getPercent(), Matchers.equalTo(0d)); rank = rankIterator.next(); - assertEquals(0.5, rank.getValue(), 0d); - assertThat(rank.getPercent(), Matchers.greaterThan(0d)); - assertThat(rank.getPercent(), Matchers.lessThan(100d)); + assertEquals(0.5, rank.value(), 0d); + assertThat(rank.percent(), Matchers.greaterThan(0d)); + assertThat(rank.percent(), Matchers.lessThan(100d)); rank = rankIterator.next(); - assertEquals(12, rank.getValue(), 0d); + assertEquals(12, rank.value(), 0d); // TODO: Fix T-Digest: this assertion should pass but we currently get ~59 // https://github.com/elastic/elasticsearch/issues/14851 // assertThat(rank.getPercent(), Matchers.equalTo(100d)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java index a43cf3da4bbae..94b249754cb5c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java @@ -58,13 +58,13 @@ public String getName() { protected static double[] percentilesArray(Percentiles percentiles) { double[] result = new double[99]; percentiles.forEach(percentile -> { - if (Double.isNaN(percentile.getValue())) { + if (Double.isNaN(percentile.value())) { throw ExceptionsHelper.badRequestException( "[{}] requires at all the percentiles values to be finite numbers", NAME.getPreferredName() ); } - result[((int) percentile.getPercent()) - 1] = percentile.getValue(); + result[((int) percentile.percent()) - 1] = percentile.value(); }); return result; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java index ea70418a0c6a3..612860efee549 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java @@ -408,7 +408,7 @@ private boolean processGeoCentroid(GeoCentroid agg) { private boolean processPercentiles(Percentiles percentiles) { Iterator percentileIterator = percentiles.iterator(); - boolean aggregationAdded = addMetricIfFinite(percentiles.getName(), percentileIterator.next().getValue()); + boolean aggregationAdded = addMetricIfFinite(percentiles.getName(), percentileIterator.next().value()); if (percentileIterator.hasNext()) { throw new IllegalArgumentException("Multi-percentile aggregation [" + percentiles.getName() + "] is not supported"); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java index fff94087d9def..d95f8b8f5e52a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationTestUtils.java @@ -158,9 +158,7 @@ static Percentiles createPercentiles(String name, double... values) { when(percentiles.getName()).thenReturn(name); List percentileList = new ArrayList<>(); for (double value : values) { - Percentile percentile = mock(Percentile.class); - when(percentile.getValue()).thenReturn(value); - percentileList.add(percentile); + percentileList.add(new Percentile(0.0, value)); } when(percentiles.iterator()).thenReturn(percentileList.iterator()); return percentiles; diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java index 4b64668f0268b..ce8a3d33ce42a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java @@ -330,10 +330,10 @@ public Object value(Aggregation agg, Map fieldTypeMap, String lo for (Percentile p : aggregation) { // in case of sparse data percentiles might not have data, in this case it returns NaN, // we need to guard the output and set null in this case - if (Numbers.isValidDouble(p.getValue()) == false) { - percentiles.put(OutputFieldNameConverter.fromDouble(p.getPercent()), null); + if (Numbers.isValidDouble(p.value()) == false) { + percentiles.put(OutputFieldNameConverter.fromDouble(p.percent()), null); } else { - percentiles.put(OutputFieldNameConverter.fromDouble(p.getPercent()), p.getValue()); + percentiles.put(OutputFieldNameConverter.fromDouble(p.percent()), p.value()); } } From 8e074712649b0326b2c5f046ba1bd6968d7d06cc Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 9 Oct 2023 12:09:35 +0200 Subject: [PATCH 065/176] Cleanup manually created empty iterators (#100484) No need for these, the JDK has an empty iterator built in. --- .../action/bulk/BackoffPolicy.java | 13 ++------ .../common/blobstore/fs/FsBlobContainer.java | 13 ++------ .../script/field/EmptyField.java | 14 ++------ .../common/collect/IteratorsTests.java | 33 +++++++------------ .../search/action/AsyncSearchResponse.java | 3 +- 5 files changed, 20 insertions(+), 56 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java b/server/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java index 6998c28ac89fd..5c23a84d93963 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java @@ -9,6 +9,7 @@ import org.elasticsearch.core.TimeValue; +import java.util.Collections; import java.util.Iterator; import java.util.NoSuchElementException; @@ -96,17 +97,7 @@ private static TimeValue checkDelay(TimeValue delay) { private static class NoBackoff extends BackoffPolicy { @Override public Iterator iterator() { - return new Iterator() { - @Override - public boolean hasNext() { - return false; - } - - @Override - public TimeValue next() { - throw new NoSuchElementException("No backoff"); - } - }; + return Collections.emptyIterator(); } } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index f3857fe60b08d..9f2971e24cbf3 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -53,6 +53,7 @@ import java.nio.file.StandardOpenOption; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -138,17 +139,7 @@ private DirectoryStream newDirectoryStreamIfFound(String blobNamePrefix) t return new DirectoryStream<>() { @Override public Iterator iterator() { - return new Iterator<>() { - @Override - public boolean hasNext() { - return false; - } - - @Override - public Path next() { - return null; - } - }; + return Collections.emptyIterator(); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/field/EmptyField.java b/server/src/main/java/org/elasticsearch/script/field/EmptyField.java index 2ca171acf6c40..7c59d45d878e7 100644 --- a/server/src/main/java/org/elasticsearch/script/field/EmptyField.java +++ b/server/src/main/java/org/elasticsearch/script/field/EmptyField.java @@ -8,8 +8,8 @@ package org.elasticsearch.script.field; +import java.util.Collections; import java.util.Iterator; -import java.util.NoSuchElementException; /** * A script {@code Field} with no mapping, always returns {@code defaultValue}. @@ -47,16 +47,6 @@ public Object get(int index, Object defaultValue) { @Override public Iterator iterator() { - return new Iterator<>() { - @Override - public boolean hasNext() { - return false; - } - - @Override - public Object next() { - throw new NoSuchElementException(); - } - }; + return Collections.emptyIterator(); } } diff --git a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java index 341ebea2a2a0c..eb1d5838c734b 100644 --- a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -37,12 +37,12 @@ public void testNoConcatenation() { } public void testEmptyConcatenation() { - Iterator iterator = Iterators.concat(empty()); + Iterator iterator = Iterators.concat(Collections.emptyIterator()); assertEmptyIterator(iterator); } public void testMultipleEmptyConcatenation() { - Iterator iterator = Iterators.concat(empty(), empty()); + Iterator iterator = Iterators.concat(Collections.emptyIterator(), Collections.emptyIterator()); assertEmptyIterator(iterator); } @@ -53,12 +53,12 @@ public void testSingleton() { public void testEmptyBeforeSingleton() { int value = randomInt(); - assertSingleton(value, empty(), singletonIterator(value)); + assertSingleton(value, Collections.emptyIterator(), singletonIterator(value)); } public void testEmptyAfterSingleton() { int value = randomInt(); - assertSingleton(value, singletonIterator(value), empty()); + assertSingleton(value, singletonIterator(value), Collections.emptyIterator()); } public void testRandomSingleton() { @@ -68,7 +68,7 @@ public void testRandomSingleton() { @SuppressWarnings({ "rawtypes", "unchecked" }) Iterator[] iterators = new Iterator[numberOfIterators]; for (int i = 0; i < numberOfIterators; i++) { - iterators[i] = i != singletonIndex ? empty() : singletonIterator(value); + iterators[i] = i != singletonIndex ? Collections.emptyIterator() : singletonIterator(value); } assertSingleton(value, iterators); } @@ -94,7 +94,12 @@ public void testRandomIterators() { public void testTwoEntries() { int first = randomInt(); int second = randomInt(); - Iterator concat = Iterators.concat(singletonIterator(first), empty(), empty(), singletonIterator(second)); + Iterator concat = Iterators.concat( + singletonIterator(first), + Collections.emptyIterator(), + Collections.emptyIterator(), + singletonIterator(second) + ); assertContainsInOrder(concat, first, second); } @@ -109,7 +114,7 @@ public void testNull() { public void testNullIterator() { try { - Iterators.concat(singletonIterator(1), empty(), null, empty(), singletonIterator(2)); + Iterators.concat(singletonIterator(1), Collections.emptyIterator(), null, Collections.emptyIterator(), singletonIterator(2)); fail("expected " + NullPointerException.class.getSimpleName()); } catch (NullPointerException e) { @@ -268,20 +273,6 @@ private void assertSingleton(T value, Iterator... iterators) { assertContainsInOrder(concat, value); } - private Iterator empty() { - return new Iterator() { - @Override - public boolean hasNext() { - return false; - } - - @Override - public T next() { - throw new NoSuchElementException(); - } - }; - } - @SafeVarargs @SuppressWarnings({ "varargs" }) private void assertContainsInOrder(Iterator iterator, T... values) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java index ad74ea7488766..39a50a4011354 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.async.AsyncResponse; import java.io.IOException; +import java.util.Collections; import java.util.Iterator; import static org.elasticsearch.rest.RestStatus.OK; @@ -216,7 +217,7 @@ public Iterator toXContentChunked(ToXContent.Params params } return builder; }), - searchResponse == null ? Iterators.concat() : searchResponse.toXContentChunked(params), + searchResponse == null ? Collections.emptyIterator() : searchResponse.toXContentChunked(params), ChunkedToXContentHelper.singleChunk((builder, p) -> { if (error != null) { builder.startObject("error"); From c7b85758fb7420939b149ad57ac94efa5df5f206 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 9 Oct 2023 11:18:53 +0100 Subject: [PATCH 066/176] Fix testIndexCompatibilityChecks (#100321) This test was sometimes picking a v8.x version for the previous node, but such a node cannot have any indices incompatible with `CURRENT`. We've changed the logic for creating the downgrade advice message recently so that it defaults to v7.17.0 in this situation, and this commit updates the test to match. Closes #100319 --- .../elasticsearch/env/NodeEnvironmentTests.java | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 0573845a73db0..cfcaeb9d9d704 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -558,7 +558,6 @@ public void testBlocksDowngradeToVersionWithMultipleNodesInDataPath() throws IOE } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100319") public void testIndexCompatibilityChecks() throws IOException { final Settings settings = buildEnvSettings(Settings.EMPTY); @@ -600,7 +599,11 @@ public void testIndexCompatibilityChecks() throws IOException { allOf( containsString("Cannot start this node"), containsString("it holds metadata for indices with version [" + oldIndexVersion + "]"), - containsString("Revert this node to version [" + previousNodeVersion + "]") + containsString( + "Revert this node to version [" + + (previousNodeVersion.major == Version.V_8_0_0.major ? Version.V_7_17_0 : previousNodeVersion) + + "]" + ) ) ); @@ -621,8 +624,13 @@ public void testIndexCompatibilityChecks() throws IOException { () -> checkForIndexCompatibility(logger, env.dataPaths()) ); - assertThat(ex.getMessage(), startsWith("cannot upgrade a node from version [" + oldVersion + "] directly")); - assertThat(ex.getMessage(), containsString("upgrade to version [" + Build.current().minWireCompatVersion())); + assertThat( + ex.getMessage(), + allOf( + startsWith("cannot upgrade a node from version [" + oldVersion + "] directly"), + containsString("upgrade to version [" + Build.current().minWireCompatVersion()) + ) + ); } } From 98a29e6f80b713431d67b12f0e1d72b48a6c9ad2 Mon Sep 17 00:00:00 2001 From: skyguard1 Date: Mon, 9 Oct 2023 05:54:12 -0500 Subject: [PATCH 067/176] Fix the issue that PersistentTask.getState() return value may cause NullPointerException (#98061) --- docs/changelog/98061.yaml | 6 ++++++ .../TransportGetDataFrameAnalyticsStatsAction.java | 2 +- .../xpack/transform/TransformUsageTransportAction.java | 9 +++++---- .../transform/action/TransportStartTransformAction.java | 2 +- 4 files changed, 13 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/98061.yaml diff --git a/docs/changelog/98061.yaml b/docs/changelog/98061.yaml new file mode 100644 index 0000000000000..3955b262017f0 --- /dev/null +++ b/docs/changelog/98061.yaml @@ -0,0 +1,6 @@ +pr: 98061 +summary: Fix possible NPE when getting transform stats for failed transforms +area: Transform +type: bug +issues: + - 98052 diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java index 0f3d27cc196ed..c0c4a5cb5b3b8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java @@ -370,7 +370,7 @@ private GetDataFrameAnalyticsStatsAction.Response.Stats buildStats( String failureReason = null; if (analyticsState == DataFrameAnalyticsState.FAILED) { DataFrameAnalyticsTaskState taskState = (DataFrameAnalyticsTaskState) analyticsTask.getState(); - failureReason = taskState.getReason(); + failureReason = taskState != null ? taskState.getReason() : null; } DiscoveryNode node = null; String assignmentExplanation = null; diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java index 76db72cdd3908..2f3ed29ea08fc 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java @@ -48,6 +48,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Optional; import java.util.stream.Stream; import static java.util.stream.Collectors.toMap; @@ -103,10 +104,10 @@ protected void masterOperation( final Map transformsCountByState = new HashMap<>(); for (PersistentTasksCustomMetadata.PersistentTask transformTask : transformTasks) { TransformState transformState = (TransformState) transformTask.getState(); - TransformTaskState taskState = transformState.getTaskState(); - if (taskState != null) { - transformsCountByState.merge(taskState.value(), 1L, Long::sum); - } + Optional.ofNullable(transformState) + .map(TransformState::getTaskState) + .map(TransformTaskState::value) + .ifPresent(value -> transformsCountByState.merge(value, 1L, Long::sum)); } final SetOnce> transformsCountByFeature = new SetOnce<>(); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java index 13423de7d3a85..db24470433003 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java @@ -163,7 +163,7 @@ protected void masterOperation( ); } else { TransformState transformState = (TransformState) existingTask.getState(); - if (transformState.getTaskState() == TransformTaskState.FAILED) { + if (transformState != null && transformState.getTaskState() == TransformTaskState.FAILED) { listener.onFailure( new ElasticsearchStatusException( TransformMessages.getMessage(CANNOT_START_FAILED_TRANSFORM, request.getId(), transformState.getReason()), From c956eec2bb8cc8f0eeab16d4c6d1eaf278226331 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Mon, 9 Oct 2023 12:08:54 +0100 Subject: [PATCH 068/176] Mute failing MedianAbsoluteDeviationIntGroupingAggregatorFunctionTests/testSimpleFinishClose (#100499) Test `MedianAbsoluteDeviationIntGroupingAggregatorFunctionTests/testSimpleFinishClose` is failing with ``` org.elasticsearch.compute.aggregation.MedianAbsoluteDeviationLongGroupingAggregatorFunctionTests > testSimpleFinishClose FAILED java.lang.AssertionError: 2 at org.elasticsearch.compute.operator.OperatorTestCase.testSimpleFinishClose(OperatorTestCase.java:224) at java.base/jdk.internal.reflect.DirectMethodHandleAccessor.invoke(DirectMethodHandleAccessor.java:104) at java.base/java.lang.reflect.Method.invoke(Method.java:578) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1758) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:946) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:982) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:996) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) ``` Mute it. Relates #100496 --- .../org/elasticsearch/compute/operator/OperatorTestCase.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java index 6de20e597e580..2022375c8c774 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java @@ -218,6 +218,7 @@ protected final void assertSimple(DriverContext context, int size) { } // Tests that finish then close without calling getOutput to retrieve a potential last page, releases all memory + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100496") public void testSimpleFinishClose() { DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), 1)); From 2cfdb7a92d771451eb32788f40852ed54d5c6192 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 9 Oct 2023 22:18:54 +1100 Subject: [PATCH 069/176] Log a debug level message for deleting non-existing snapshot (#100479) The new message helps pairing with the "deleting snapshots" log message at info level. --- .../java/org/elasticsearch/snapshots/SnapshotsService.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index a4ce775a1c2bd..0152caaea13fe 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -1931,7 +1931,9 @@ public ClusterState execute(ClusterState currentState) { final SnapshotId foundId = snapshotsIdsInRepository.get(snapshotOrPattern); if (foundId == null) { if (snapshotIds.stream().noneMatch(snapshotId -> snapshotId.getName().equals(snapshotOrPattern))) { - throw new SnapshotMissingException(repositoryName, snapshotOrPattern); + final var snapshotMissingException = new SnapshotMissingException(repositoryName, snapshotOrPattern); + logger.debug(snapshotMissingException.getMessage()); + throw snapshotMissingException; } } else { snapshotIds.add(foundId); @@ -2353,6 +2355,7 @@ public void onFailure(Exception e) { @Override public void onFailure(Exception e) { + logger.debug(() -> "failed to complete snapshot deletion [" + deleteEntry + "]", e); submitUnbatchedTask( "remove snapshot deletion metadata after failed delete", new RemoveSnapshotDeletionAndContinueTask(deleteEntry, repositoryData) { From c879775528d432c06b4da84ffe21f019057e2ec6 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Mon, 9 Oct 2023 13:45:16 +0200 Subject: [PATCH 070/176] ESQL: Limit how many bytes `concat()` can process (#100360) This adds a limit on the bytes length that a concatenated string can get to. The limit is set to 1MB (per entry). When the limit is hit, an exception is returned to the caller (similar to an accounting circuit breaking) and execution halted. Related: #100288. --- docs/changelog/100360.yaml | 5 +++ .../esql/qa/single_node/HeapAttackIT.java | 10 ++++- .../function/scalar/string/Concat.java | 23 +++++++++++ .../function/scalar/string/ConcatTests.java | 40 +++++++++++++++++++ 4 files changed, 76 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/100360.yaml diff --git a/docs/changelog/100360.yaml b/docs/changelog/100360.yaml new file mode 100644 index 0000000000000..6d0dcafe16a8f --- /dev/null +++ b/docs/changelog/100360.yaml @@ -0,0 +1,5 @@ +pr: 100360 +summary: "ESQL: Limit how many bytes `concat()` can process" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java index 6cedba3e4ee28..55b4454ce2105 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java @@ -152,10 +152,16 @@ public void testSmallConcat() throws IOException { assertMap(map, matchesMap().entry("columns", columns).entry("values", values)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") public void testHugeConcat() throws IOException { initSingleDocIndex(); - assertCircuitBreaks(() -> concat(10)); + ResponseException e = expectThrows(ResponseException.class, () -> concat(10)); + Map map = XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(e.getResponse().getEntity()), false); + logger.info("expected request rejected {}", map); + assertMap( + map, + matchesMap().entry("status", 400) + .entry("error", matchesMap().extraOk().entry("reason", "concatenating more than [1048576] bytes is not supported")) + ); } private Response concat(int evals) throws IOException { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java index ae805975646a6..c987513d5919e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java @@ -13,6 +13,8 @@ import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.esql.EsqlClientException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expressions; @@ -27,6 +29,7 @@ import java.util.function.Function; import java.util.stream.Stream; +import static org.elasticsearch.common.unit.ByteSizeUnit.MB; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; @@ -34,6 +37,9 @@ * Join strings. */ public class Concat extends ScalarFunction implements EvaluatorMapper { + + static final long MAX_CONCAT_LENGTH = MB.toBytes(1); + public Concat(Source source, Expression first, List rest) { super(source, Stream.concat(Stream.of(first), rest.stream()).toList()); } @@ -83,6 +89,7 @@ public ExpressionEvaluator.Factory toEvaluator(Function MAX_CONCAT_LENGTH) { + throw new EsqlClientException("concatenating more than [" + MAX_CONCAT_LENGTH + "] bytes is not supported") { + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; // return a 400 response + } + }; + } + return length; + } + @Override public Expression replaceChildren(List newChildren) { return new Concat(source(), newChildren.get(0), newChildren.subList(1, newChildren.size())); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java index caec572351675..32e894e5282d5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java @@ -13,22 +13,26 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.EsqlClientException; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import java.util.ArrayList; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.function.Supplier; import java.util.stream.IntStream; import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class ConcatTests extends AbstractFunctionTestCase { public ConcatTests(@Name("TestCase") Supplier testCaseSupplier) { @@ -118,6 +122,17 @@ public void testSomeConstant() { assertThat(expression.typeResolved().message(), equalTo(testCase.getExpectedTypeError())); return; } + + int totalLength = testDataLength(); + if (totalLength >= Concat.MAX_CONCAT_LENGTH || rarely()) { + boolean hasNulls = mix.stream().anyMatch(x -> x instanceof Literal l && l.value() == null) + || fieldValues.stream().anyMatch(Objects::isNull); + if (hasNulls == false) { + testOversized(totalLength, mix, fieldValues); + return; + } + } + try ( EvalOperator.ExpressionEvaluator eval = evaluator(expression).get(driverContext()); Block.Ref ref = eval.eval(row(fieldValues)) @@ -125,4 +140,29 @@ public void testSomeConstant() { assertThat(toJavaObject(ref.block(), 0), testCase.getMatcher()); } } + + private void testOversized(int totalLen, List mix, List fieldValues) { + for (int len; totalLen < Concat.MAX_CONCAT_LENGTH; totalLen += len) { + len = randomIntBetween(1, (int) Concat.MAX_CONCAT_LENGTH); + mix.add(new Literal(Source.EMPTY, new BytesRef(randomAlphaOfLength(len)), DataTypes.KEYWORD)); + } + Expression expression = build(testCase.getSource(), mix); + Exception e = expectThrows(EsqlClientException.class, () -> { + try ( + EvalOperator.ExpressionEvaluator eval = evaluator(expression).get(driverContext()); + Block.Ref ref = eval.eval(row(fieldValues)); + ) {} + }); + assertThat(e.getMessage(), is("concatenating more than [1048576] bytes is not supported")); + } + + private int testDataLength() { + int totalLength = 0; + for (var data : testCase.getData()) { + if (data.data() instanceof BytesRef bytesRef) { + totalLength += bytesRef.length; + } + } + return totalLength; + } } From 1fa816ee8c05712bc7e24c78d90d7a346fc70f1f Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Mon, 9 Oct 2023 13:46:00 +0200 Subject: [PATCH 071/176] Use toolchain in favor of custom jdk plugin for runtime jdk setup (#99922) * Use toolchain in favor of custom jdk plugin for runtime jdk setup * Remove logic for Passing runtime java version via JAVAX_HOME * Cleanup runtime provisioning gradle script --- ...elasticsearch.runtime-jdk-provision.gradle | 27 ++++++++----------- .../internal/info/GlobalBuildInfoPlugin.java | 24 +++++------------ .../ArchivedOracleJdkToolchainResolver.java | 4 +-- ...hivedOracleJdkToolchainResolverSpec.groovy | 6 +++++ gradle.properties | 1 - 5 files changed, 26 insertions(+), 36 deletions(-) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle index 8d68f77da1e97..7c7c05facb2e1 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle @@ -18,19 +18,15 @@ import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask // gradle has an open issue of failing applying plugins in // precompiled script plugins (see https://github.com/gradle/gradle/issues/17004) - configure(allprojects) { - apply plugin: 'elasticsearch.jdk-download' - - jdks { - provisioned_runtime { - vendor = VersionProperties.bundledJdkVendor - version = VersionProperties.bundledJdkVersion - platform = OS.current().name().toLowerCase() - architecture = Architecture.current().name().toLowerCase() - } + def launcher = javaToolchains.launcherFor { + languageVersion = JavaLanguageVersion.of(VersionProperties.bundledJdkMajorVersion) + vendor = VersionProperties.bundledJdkVendor == "openjdk" ? + JvmVendorSpec.ORACLE : + JvmVendorSpec.matching(VersionProperties.bundledJdkVendor) } - project.tasks.withType(Test).configureEach { Test test -> + + project.tasks.withType(Test).configureEach { Test test -> if (BuildParams.getIsRuntimeJavaHomeSet()) { test.executable = "${BuildParams.runtimeJavaHome}/bin/java" + (OS.current() == OS.WINDOWS ? '.exe' : '') @@ -47,7 +43,7 @@ configure(allprojects) { project.plugins.withId("elasticsearch.internal-testclusters") { internalPlugin -> if (BuildParams.getIsRuntimeJavaHomeSet() == false) { // If no runtime java home is set, use the bundled JDK for test clusters - testClustersPlugin.setRuntimeJava(providers.provider(() -> file("${project.jdks.provisioned_runtime.javaHomePath}"))) + testClustersPlugin.setRuntimeJava(launcher.map { it.metadata.installationPath.asFile }) } } } @@ -55,8 +51,7 @@ configure(allprojects) { project.plugins.withType(RestTestBasePlugin) { tasks.withType(StandaloneRestIntegTestTask).configureEach { if (BuildParams.getIsRuntimeJavaHomeSet() == false) { - dependsOn(project.jdks.provisioned_runtime) - nonInputProperties.systemProperty("tests.runtime.java", "${-> project.jdks.provisioned_runtime.javaHomePath}") + nonInputProperties.systemProperty("tests.runtime.java", "${-> launcher.map { it.metadata.installationPath.asFile.path }.get()}") } } } @@ -64,8 +59,8 @@ configure(allprojects) { project.plugins.withType(ThirdPartyAuditPrecommitPlugin) { project.getTasks().withType(ThirdPartyAuditTask.class).configureEach { if (BuildParams.getIsRuntimeJavaHomeSet() == false) { - javaHome.set(providers.provider(() -> "${project.jdks.provisioned_runtime.javaHomePath}")) - targetCompatibility.set(providers.provider(() -> JavaVersion.toVersion(project.jdks.provisioned_runtime.major))) + javaHome.set(launcher.map { it.metadata.installationPath.asFile.path }) + targetCompatibility.set(providers.provider(() -> JavaVersion.toVersion(launcher.map { it.metadata.javaRuntimeVersion }.get()))) } } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index 8dfea22ae2f91..115c4b0694141 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -300,7 +300,7 @@ private File findRuntimeJavaHome() { String runtimeJavaProperty = System.getProperty("runtime.java"); if (runtimeJavaProperty != null) { - return new File(findJavaHome(runtimeJavaProperty)); + return resolveJavaHomeFromToolChainService(runtimeJavaProperty); } String env = System.getenv("RUNTIME_JAVA_HOME"); if (env != null) { @@ -311,12 +311,6 @@ private File findRuntimeJavaHome() { return env == null ? Jvm.current().getJavaHome() : new File(env); } - private String findJavaHome(String version) { - String javaHomeEnvVar = getJavaHomeEnvVarName(version); - String env = System.getenv(javaHomeEnvVar); - return env != null ? resolveJavaHomeFromEnvVariable(javaHomeEnvVar) : resolveJavaHomeFromToolChainService(version); - } - @NotNull private String resolveJavaHomeFromEnvVariable(String javaHomeEnvVar) { Provider javaHomeNames = providers.gradleProperty("org.gradle.java.installations.fromEnv"); @@ -348,17 +342,13 @@ private String resolveJavaHomeFromEnvVariable(String javaHomeEnvVar) { } @NotNull - private String resolveJavaHomeFromToolChainService(String version) { + private File resolveJavaHomeFromToolChainService(String version) { Property value = objectFactory.property(JavaLanguageVersion.class).value(JavaLanguageVersion.of(version)); - Provider javaLauncherProvider = toolChainService.launcherFor( - javaToolchainSpec -> javaToolchainSpec.getLanguageVersion().value(value) - ); - - try { - return javaLauncherProvider.get().getMetadata().getInstallationPath().getAsFile().getCanonicalPath(); - } catch (IOException e) { - throw new RuntimeException(e); - } + Provider javaLauncherProvider = toolChainService.launcherFor(javaToolchainSpec -> { + javaToolchainSpec.getLanguageVersion().value(value); + javaToolchainSpec.getVendor().set(JvmVendorSpec.ORACLE); + }); + return javaLauncherProvider.get().getMetadata().getInstallationPath().getAsFile(); } private static String getJavaHomeEnvVarName(String version) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolver.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolver.java index 7965a18408798..b8cffae0189ce 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolver.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolver.java @@ -25,7 +25,7 @@ public abstract class ArchivedOracleJdkToolchainResolver extends AbstractCustomJavaToolchainResolver { - private static final Map ARCHIVED_BASE_VERSIONS = Maps.of(19, "19.0.2", 18, "18.0.2.1", 17, "17.0.7"); + private static final Map ARCHIVED_BASE_VERSIONS = Maps.of(20, "20.0.2", 19, "19.0.2", 18, "18.0.2.1", 17, "17.0.7"); @Override public Optional resolve(JavaToolchainRequest request) { @@ -54,6 +54,7 @@ public Optional resolve(JavaToolchainRequest request) { + arch + "_bin." + extension + ) ); } @@ -78,5 +79,4 @@ private boolean requestIsSupported(JavaToolchainRequest request) { OperatingSystem operatingSystem = buildPlatform.getOperatingSystem(); return Architecture.AARCH64 != architecture || OperatingSystem.WINDOWS != operatingSystem; } - } diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolverSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolverSpec.groovy index 7ad97480e5f25..b7f08b6016679 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolverSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/toolchain/ArchivedOracleJdkToolchainResolverSpec.groovy @@ -23,6 +23,12 @@ class ArchivedOracleJdkToolchainResolverSpec extends AbstractToolchainResolverSp @Override def supportedRequests() { return [ + [20, ORACLE, MAC_OS, X86_64, "https://download.oracle.com/java/20/archive/jdk-20.0.2_macos-x64_bin.tar.gz"], + [20, ORACLE, MAC_OS, AARCH64, "https://download.oracle.com/java/20/archive/jdk-20.0.2_macos-aarch64_bin.tar.gz"], + [20, ORACLE, LINUX, X86_64, "https://download.oracle.com/java/20/archive/jdk-20.0.2_linux-x64_bin.tar.gz"], + [20, ORACLE, LINUX, AARCH64, "https://download.oracle.com/java/20/archive/jdk-20.0.2_linux-aarch64_bin.tar.gz"], + [20, ORACLE, WINDOWS, X86_64, "https://download.oracle.com/java/20/archive/jdk-20.0.2_windows-x64_bin.zip"], + [19, ORACLE, MAC_OS, X86_64, "https://download.oracle.com/java/19/archive/jdk-19.0.2_macos-x64_bin.tar.gz"], [19, ORACLE, MAC_OS, AARCH64, "https://download.oracle.com/java/19/archive/jdk-19.0.2_macos-aarch64_bin.tar.gz"], [19, ORACLE, LINUX, X86_64, "https://download.oracle.com/java/19/archive/jdk-19.0.2_linux-x64_bin.tar.gz"], diff --git a/gradle.properties b/gradle.properties index 5ca76ac1be8bd..7ad5f24829cc0 100644 --- a/gradle.properties +++ b/gradle.properties @@ -12,7 +12,6 @@ systemProp.jdk.tls.client.protocols=TLSv1.2 # java homes resolved by environment variables org.gradle.java.installations.auto-detect=false -org.gradle.java.installations.fromEnv=JAVA_TOOLCHAIN_HOME,JAVA_HOME,RUNTIME_JAVA_HOME,JAVA21_HOME,JAVA20_HOME,JAVA19_HOME,JAVA18_HOME,JAVA17_HOME,JAVA16_HOME,JAVA15_HOME,JAVA14_HOME,JAVA13_HOME,JAVA12_HOME,JAVA11_HOME,JAVA8_HOME # log some dependency verification info to console org.gradle.dependency.verification.console=verbose From 98756e0818be5fae46b84418f0602bcfa7717c7c Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 9 Oct 2023 14:30:37 +0200 Subject: [PATCH 072/176] [Profiling] Add backwards-compatibility test (#99730) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With this commit we add a basic backwards-compatibility test that checks whether indices and data streams can be properly upgraded to the latest version. Co-authored-by: Tim Rühsen --------- Co-authored-by: Tim Rühsen --- .../rest-api-spec/api/profiling.status.json | 37 +++++++++++++++++++ .../test/old_cluster/150_profiling.yml | 24 ++++++++++++ .../test/upgraded_cluster/150_profiling.yml | 18 +++++++++ 3 files changed, 79 insertions(+) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/profiling.status.json create mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/150_profiling.yml create mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/150_profiling.yml diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.status.json new file mode 100644 index 0000000000000..76296e1079be2 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.status.json @@ -0,0 +1,37 @@ +{ + "profiling.status":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/observability/current/universal-profiling.html", + "description":"Returns basic information about the status of Universal Profiling." + }, + "stability":"stable", + "visibility":"private", + "headers":{ + "accept": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_profiling/status", + "methods":[ + "GET" + ] + } + ] + }, + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + }, + "wait_for_resources_created":{ + "type":"boolean", + "description":"Whether to return immediately or wait until resources have been created" + } + } + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/150_profiling.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/150_profiling.yml new file mode 100644 index 0000000000000..aae8eb2367f76 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/150_profiling.yml @@ -0,0 +1,24 @@ +--- +"Test Profiling Index Management creates indices": + - skip: + version: " - 8.10.99" + reason: status check API has been introduced with 8.11.0 + + - do: + profiling.status: + wait_for_resources_created: false + - is_false: 'resource_management.enabled' + - is_false: 'resources.created' + + - do: + cluster.put_settings: + body: + transient: + xpack.profiling.templates.enabled: "true" + flat_settings: true + + - do: + profiling.status: + wait_for_resources_created: true + - is_true: 'resource_management.enabled' + - is_true: 'resources.created' diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/150_profiling.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/150_profiling.yml new file mode 100644 index 0000000000000..0439ac4b0206c --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/150_profiling.yml @@ -0,0 +1,18 @@ +--- +"Test Profiling Index Management updates indices": + - skip: + version: " - 8.10.99" + reason: status check API has been introduced with 8.11.0 + + - do: + cluster.put_settings: + body: + transient: + xpack.profiling.templates.enabled: "true" + flat_settings: true + + - do: + profiling.status: + wait_for_resources_created: true + - is_true: 'resource_management.enabled' + - is_true: 'resources.created' From 8ff7dee0f00d5da2429c58f3fb5efb3bf06a8c4f Mon Sep 17 00:00:00 2001 From: Valeriy Khakhutskyy <1292899+valeriy42@users.noreply.github.com> Date: Mon, 9 Oct 2023 15:01:11 +0200 Subject: [PATCH 073/176] [ML] Prevent resource over-subscription in model allocation planner (#100392) When rescaling or changing the number of nodes, the allocator tries to ensure that previously allocated model deployments always have at least one allocation available. However, we did not ensure that the Resource Tracker would always have a positive amount of memory on the nodes when taking into account the memory of the previously allocated deployments. In a particular scenario, when we downsize the nodes, this can lead to the overallocation of resources. This PR adds the missing asserting and hardens the unit tests to ensure that we don't use more resources than we have. --- docs/changelog/100392.yaml | 5 +++ .../planning/AbstractPreserveAllocations.java | 10 +++-- .../assignment/planning/AssignmentPlan.java | 3 ++ .../planning/AssignmentPlannerTests.java | 42 ++++++++++++++++++- 4 files changed, 55 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/100392.yaml diff --git a/docs/changelog/100392.yaml b/docs/changelog/100392.yaml new file mode 100644 index 0000000000000..ab693d5ae04ce --- /dev/null +++ b/docs/changelog/100392.yaml @@ -0,0 +1,5 @@ +pr: 100392 +summary: Prevent resource over-subscription in model allocation planner +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java index d0ae0341edc51..4843cc43d1187 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java @@ -80,10 +80,12 @@ AssignmentPlan mergePreservedAllocations(AssignmentPlan assignmentPlan) { for (Node n : nodes) { int allocations = assignmentsByModelNodeIdPair.getOrDefault(Tuple.tuple(m.id(), n.id()), 0); if (m.currentAllocationsByNodeId().containsKey(n.id())) { - allocations += addPreservedAllocations(n, m); - // As the node has all its available memory we need to manually account memory of models with - // current allocations. - mergedPlanBuilder.accountMemory(m, n); + if (mergedPlanBuilder.getRemainingMemory(n) >= m.memoryBytes()) { + allocations += addPreservedAllocations(n, m); + // As the node has all its available memory we need to manually account memory of models with + // current allocations. + mergedPlanBuilder.accountMemory(m, n); + } } if (allocations > 0) { mergedPlanBuilder.assignModelToNode(m, n, allocations); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java index ef8b85ec07f73..72a83d7579463 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java @@ -349,6 +349,9 @@ private boolean isAlreadyAssigned(Deployment deployment, Node node) { public void accountMemory(Deployment m, Node n) { remainingNodeMemory.computeIfPresent(n, (k, v) -> v - m.memoryBytes()); + if (remainingNodeMemory.get(n) < 0) { + throw new IllegalArgumentException("not enough memory on node [" + n.id() + "] to assign model [" + m.id() + "]"); + } } public AssignmentPlan build() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java index 240196e77745d..82a291a8d9fb2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java @@ -361,6 +361,9 @@ public void testGivenLargerModelWithPreviousAssignmentsAndSmallerModelWithoutAss Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of(), 0); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment1, deployment2)) .computePlan(); + assertThat(assignmentPlan.getRemainingNodeMemory("n_1"), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeMemory("n_2"), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeMemory("n_3"), greaterThanOrEqualTo(0L)); { assertThat(assignmentPlan.assignments(deployment1).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment1).get(); @@ -403,6 +406,8 @@ public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously( assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2")); assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + assertThat(assignmentPlan.getRemainingNodeMemory("n_1"), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeMemory("n_2"), greaterThanOrEqualTo(0L)); } public void testGivenPreviouslyAssignedModels_CannotAllBeAllocated() { @@ -419,7 +424,7 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { Node node1 = new Node("n_1", ByteSizeValue.ofMb(1200).getBytes(), 2); Node node2 = new Node("n_2", ByteSizeValue.ofMb(1200).getBytes(), 2); Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0); - Deployment deployment2 = new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0); // First only start m_1 @@ -458,13 +463,21 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { // First, one node goes away. assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan(); + assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L)); // Then, a node double in memory size is added. assignmentPlan = new AssignmentPlanner(List.of(node1, node3), createModelsFromPlan(assignmentPlan)).computePlan(); + assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeMemory(node3.id()), greaterThanOrEqualTo(0L)); // And another. assignmentPlan = new AssignmentPlanner(List.of(node1, node3, node4), createModelsFromPlan(assignmentPlan)).computePlan(); + assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeMemory(node3.id()), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeMemory(node4.id()), greaterThanOrEqualTo(0L)); // Finally, the remaining smaller node is removed assignmentPlan = new AssignmentPlanner(List.of(node3, node4), createModelsFromPlan(assignmentPlan)).computePlan(); + assertThat(assignmentPlan.getRemainingNodeMemory(node3.id()), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeMemory(node4.id()), greaterThanOrEqualTo(0L)); indexedBasedPlan = convertToIdIndexed(assignmentPlan); assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2", "m_3")); @@ -477,6 +490,33 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { assertThat(assignmentPlan.getRemainingNodeCores("n_2"), equalTo(0)); } + public void testGivenClusterResize_ShouldRemoveAllocatedModels() { + // Ensure that plan is removing previously allocated models if not enough memory is available + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1200).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1200).getBytes(), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 1, 1, Map.of(), 0); + + // Create a plan where all deployments are assigned at least once + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) + .computePlan(); + Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2", "m_3")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); + assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeMemory(node2.id()), greaterThanOrEqualTo(0L)); + + // Now the cluster starts getting resized. Ensure that resources are not over-allocated. + assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan(); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeCores(node1.id()), greaterThanOrEqualTo(0)); + + } + public static List createModelsFromPlan(AssignmentPlan plan) { List deployments = new ArrayList<>(); for (Deployment m : plan.models()) { From cadcb9b5fded6b836b6b0b5690df5fab91524167 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 9 Oct 2023 14:07:19 +0100 Subject: [PATCH 074/176] Parallelize stale index deletion (#100316) After deleting a snapshot today we clean up all the now-dangling indices sequentially, which can be rather slow. With this commit we parallelize the work across the whole `SNAPSHOT` pool on the master node. Closes #61513 Co-authored-by: Piyush Daftary --- docs/changelog/100316.yaml | 6 + .../snapshots/RepositoriesIT.java | 195 ++++++++++++++++++ .../common/blobstore/DeleteResult.java | 8 + .../AbstractThrottledTaskRunner.java | 51 +++++ .../blobstore/BlobStoreRepository.java | 165 ++++++++------- .../AbstractThrottledTaskRunnerTests.java | 50 +++++ .../snapshots/mockstore/MockRepository.java | 12 ++ 7 files changed, 403 insertions(+), 84 deletions(-) create mode 100644 docs/changelog/100316.yaml diff --git a/docs/changelog/100316.yaml b/docs/changelog/100316.yaml new file mode 100644 index 0000000000000..9efb64a332dc1 --- /dev/null +++ b/docs/changelog/100316.yaml @@ -0,0 +1,6 @@ +pr: 100316 +summary: Parallelize stale index deletion +area: Snapshot/Restore +type: enhancement +issues: + - 61513 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index ffa45e3136b51..5f4c270f69348 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -13,22 +13,38 @@ import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.SimpleBatchedExecutor; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.RepositoryConflictException; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; import java.nio.file.Path; import java.util.List; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.IntSupplier; +import java.util.function.ToLongFunction; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.READONLY_SETTING_KEY; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -295,4 +311,183 @@ public void testRepositoryConflict() throws Exception { logger.info("--> wait until snapshot deletion is finished"); assertAcked(future.actionGet()); } + + public void testLeakedStaleIndicesAreDeletedBySubsequentDelete() throws Exception { + Client client = client(); + Path repositoryPath = randomRepoPath(); + final String repositoryName = "test-repo"; + final String snapshot1Name = "test-snap-1"; + final String snapshot2Name = "test-snap-2"; + + logger.info("--> creating repository at {}", repositoryPath.toAbsolutePath()); + createRepository(repositoryName, "mock", repositoryPath); + + logger.info("--> creating index-1 and ingest data"); + createIndex("test-idx-1"); + ensureGreen(); + for (int j = 0; j < 10; j++) { + indexDoc("test-idx-1", Integer.toString(10 + j), "foo", "bar" + 10 + j); + } + refresh(); + + logger.info("--> creating first snapshot"); + createFullSnapshot(repositoryName, snapshot1Name); + + logger.info("--> creating index-2 and ingest data"); + createIndex("test-idx-2"); + ensureGreen(); + for (int j = 0; j < 10; j++) { + indexDoc("test-idx-2", Integer.toString(10 + j), "foo", "bar" + 10 + j); + } + refresh(); + + logger.info("--> creating second snapshot"); + createFullSnapshot(repositoryName, snapshot2Name); + + // Make repository throw exceptions when trying to delete stale indices + // This will make sure stale indices stay in repository after snapshot delete + final var repository = (MockRepository) internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class) + .repository(repositoryName); + repository.setFailOnDeleteContainer(true); + + logger.info("--> delete the second snapshot"); + client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshot2Name).get(); + + // Make repository work normally + repository.setFailOnDeleteContainer(false); + + // This snapshot should delete last snapshot's residual stale indices as well + logger.info("--> delete snapshot one"); + client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshot1Name).get(); + + logger.info("--> check no leftover files"); + assertFileCount(repositoryPath, 2); // just the index-N and index.latest blobs + + logger.info("--> done"); + } + + public void testCleanupStaleBlobsConcurrency() throws Exception { + // This test is verifying the detailed behaviour of cleanup tasks that are enqueued after a snapshot delete is committed to the + // repository, ensuring that we see exactly the right number of tasks enqueued at each stage to demonstrate that we do use all the + // threads available to us, but don't spam the threadpool queue with all the tasks at once, and that we submit one task that drains + // the queue eagerly to provide backpressure. That means this test is sensitive to changes in the breakdown of the cleanup work + // after a snapshot delete. + + final var client = client(); + final var repositoryPath = randomRepoPath(); + final var repositoryName = "test-repo"; + createRepository(repositoryName, "mock", repositoryPath); + + final var threadPool = internalCluster().getCurrentMasterNodeInstance(ThreadPool.class); + final var snapshotPoolSize = threadPool.info(ThreadPool.Names.SNAPSHOT).getMax(); + final var indexCount = snapshotPoolSize * 3; + + for (int i = 0; i < indexCount; i++) { + createIndex("test-idx-" + i); + for (int j = 0; j < 10; j++) { + indexDoc("test-idx-" + i, Integer.toString(10 + j), "foo", "bar" + 10 + j); + } + } + + ensureGreen(); + + final var snapshotName = "test-snap"; + createFullSnapshot(repositoryName, snapshotName); + + final var executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + final var barrier = new CyclicBarrier(snapshotPoolSize + 1); + final var keepBlocking = new AtomicBoolean(true); + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final ToLongFunction repoGenFn = s -> RepositoriesMetadata.get(s).repository(repositoryName).generation(); + final var repositoryGenerationBeforeDelete = repoGenFn.applyAsLong(clusterService.state()); + final ClusterStateListener clusterStateListener = event -> { + if (repoGenFn.applyAsLong(event.previousState()) == repositoryGenerationBeforeDelete + && repoGenFn.applyAsLong(event.state()) > repositoryGenerationBeforeDelete) { + // We are updating the safe repository generation which indicates that the snapshot delete is complete. Once this cluster + // state update completes we will enqueue all the cleanup work on the SNAPSHOT pool. So here we prepare for that by blocking + // all the SNAPSHOT threads: + + // All but one of the threads just repeatedly block on the barrier without picking up any new tasks + for (int i = 0; i < snapshotPoolSize - 1; i++) { + executor.execute(() -> { + while (keepBlocking.get()) { + safeAwait(barrier); + safeAwait(barrier); + } + }); + } + + // The last thread runs a task which blocks on the barrier and then enqueues itself again, at the back of the queue, + // so that this thread will run everything _currently_ in the queue each time the barrier is released, in the order in which + // it was enqueued, and will then block on the barrier again. + new Runnable() { + @Override + public void run() { + executor.execute(() -> { + safeAwait(barrier); + safeAwait(barrier); + if (keepBlocking.get()) { + this.run(); + } + }); + } + }.run(); + } + }; + clusterService.addListener(clusterStateListener); + + final var deleteFuture = new PlainActionFuture(); + client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshotName).execute(deleteFuture); + + safeAwait(barrier); // wait for all the snapshot threads to be blocked + clusterService.removeListener(clusterStateListener); + + // We must wait for all the cleanup work to be enqueued (with the throttled runner at least) so we can be sure of exactly how it + // will execute. The cleanup work is enqueued by the master service thread on completion of the cluster state update which increases + // the root blob generation in the repo metadata, so it is sufficient to wait for another no-op task to run on the master service: + PlainActionFuture.get(fut -> clusterService.createTaskQueue("test", Priority.NORMAL, new SimpleBatchedExecutor<>() { + @Override + public Tuple executeTask(ClusterStateTaskListener clusterStateTaskListener, ClusterState clusterState) { + return Tuple.tuple(clusterState, null); + } + + @Override + public void taskSucceeded(ClusterStateTaskListener clusterStateTaskListener, Object ignored) { + fut.onResponse(null); + } + }).submitTask("test", e -> fail(), null), 10, TimeUnit.SECONDS); + + final IntSupplier queueLength = () -> threadPool.stats() + .stats() + .stream() + .filter(s -> s.name().equals(ThreadPool.Names.SNAPSHOT)) + .findFirst() + .orElseThrow() + .queue(); + + // There are indexCount (=3*snapshotPoolSize) index-deletion tasks, plus one for cleaning up the root metadata. However, the + // throttled runner only enqueues one task per SNAPSHOT thread to start with, and then the eager runner adds another one. This shows + // we are not spamming the threadpool with all the tasks at once, which means that other snapshot activities can run alongside this + // cleanup. + assertThat(queueLength.getAsInt(), equalTo(snapshotPoolSize + 1)); + + safeAwait(barrier); // unblock the barrier thread and let it process the queue + safeAwait(barrier); // wait for the queue to be processed + + // We first ran all the one-task actions, each of which completes and puts another one-task action into the queue. Then the eager + // runner runs all the remaining tasks. + assertThat(queueLength.getAsInt(), equalTo(snapshotPoolSize)); + + safeAwait(barrier); // unblock the barrier thread and let it process the queue + safeAwait(barrier); // wait for the queue to be processed + + // Since the eager runner already ran all the remaining tasks, when the enqueued actions run they add no more work to the queue. + assertThat(queueLength.getAsInt(), equalTo(0)); + + assertFileCount(repositoryPath, 2); // just the index-N and index.latest blobs + + keepBlocking.set(false); + safeAwait(barrier); // release the threads so they can exit + assertTrue(deleteFuture.get(10, TimeUnit.SECONDS).isAcknowledged()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/DeleteResult.java b/server/src/main/java/org/elasticsearch/common/blobstore/DeleteResult.java index c12685db77696..8630ed4cc768f 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/DeleteResult.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/DeleteResult.java @@ -38,4 +38,12 @@ public DeleteResult add(DeleteResult other) { public DeleteResult add(long blobs, long bytes) { return new DeleteResult(blobsDeleted + blobs, bytesDeleted + bytes); } + + public static DeleteResult of(long blobs, long bytes) { + if (blobs == 0 && bytes == 0) { + return ZERO; + } else { + return new DeleteResult(blobs, bytes); + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunner.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunner.java index ea37dad5ba218..898f73abbd1e9 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunner.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunner.java @@ -17,6 +17,7 @@ import java.util.Queue; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; /** @@ -155,4 +156,54 @@ int runningTasks() { return runningTasks.get(); } + /** + * Run a single task on the given executor which eagerly pulls tasks from the queue and executes them. This must only be used if the + * tasks in the queue are all synchronous, i.e. they release their ref before returning from {@code onResponse()}. + */ + public void runSyncTasksEagerly(Executor executor) { + executor.execute(new AbstractRunnable() { + @Override + protected void doRun() { + final AtomicBoolean isDone = new AtomicBoolean(true); + final Releasable ref = () -> isDone.set(true); + ActionListener task; + while ((task = tasks.poll()) != null) { + isDone.set(false); + try { + logger.trace("[{}] eagerly running task {}", taskRunnerName, task); + task.onResponse(ref); + } catch (Exception e) { + logger.error(Strings.format("[%s] task %s failed", taskRunnerName, task), e); + assert false : e; + task.onFailure(e); + return; + } + if (isDone.get() == false) { + logger.error( + "runSyncTasksEagerly() was called on a queue [{}] containing an async task: [{}]", + taskRunnerName, + task + ); + assert false; + return; + } + } + } + + @Override + public void onFailure(Exception e) { + logger.error("unexpected failure in runSyncTasksEagerly", e); + assert false : e; + } + + @Override + public void onRejection(Exception e) { + if (e instanceof EsRejectedExecutionException) { + logger.debug("runSyncTasksEagerly was rejected", e); + } else { + onFailure(e); + } + } + }); + } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 0825337143f8e..82a38d74d25e1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -72,6 +72,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ListenableFuture; +import org.elasticsearch.common.util.concurrent.ThrottledTaskRunner; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.CheckedConsumer; @@ -390,6 +391,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final ShardSnapshotTaskRunner shardSnapshotTaskRunner; + private final ThrottledTaskRunner staleBlobDeleteRunner; + /** * Constructs new BlobStoreRepository * @param metadata The metadata for this repository including name and settings @@ -430,6 +433,11 @@ protected BlobStoreRepository( this::doSnapshotShard, this::snapshotFile ); + staleBlobDeleteRunner = new ThrottledTaskRunner( + "cleanupStaleBlobs", + threadPool.info(ThreadPool.Names.SNAPSHOT).getMax(), + threadPool.executor(ThreadPool.Names.SNAPSHOT) + ); } @Override @@ -1150,31 +1158,65 @@ private void cleanupStaleBlobs( RepositoryData newRepoData, ActionListener listener ) { - final GroupedActionListener groupedListener = new GroupedActionListener<>(2, ActionListener.wrap(deleteResults -> { - DeleteResult deleteResult = DeleteResult.ZERO; - for (DeleteResult result : deleteResults) { - deleteResult = deleteResult.add(result); + final var blobsDeleted = new AtomicLong(); + final var bytesDeleted = new AtomicLong(); + try (var listeners = new RefCountingListener(listener.map(ignored -> DeleteResult.of(blobsDeleted.get(), bytesDeleted.get())))) { + + final List staleRootBlobs = staleRootBlobs(newRepoData, rootBlobs.keySet()); + if (staleRootBlobs.isEmpty() == false) { + staleBlobDeleteRunner.enqueueTask(listeners.acquire(ref -> { + try (ref) { + logStaleRootLevelBlobs(newRepoData.getGenId() - 1, deletedSnapshots, staleRootBlobs); + deleteFromContainer(blobContainer(), staleRootBlobs.iterator()); + for (final var staleRootBlob : staleRootBlobs) { + bytesDeleted.addAndGet(rootBlobs.get(staleRootBlob).length()); + } + blobsDeleted.addAndGet(staleRootBlobs.size()); + } catch (Exception e) { + logger.warn( + () -> format( + "[%s] The following blobs are no longer part of any snapshot [%s] but failed to remove them", + metadata.name(), + staleRootBlobs + ), + e + ); + } + })); } - listener.onResponse(deleteResult); - }, listener::onFailure)); - final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - final List staleRootBlobs = staleRootBlobs(newRepoData, rootBlobs.keySet()); - if (staleRootBlobs.isEmpty()) { - groupedListener.onResponse(DeleteResult.ZERO); - } else { - executor.execute(ActionRunnable.supply(groupedListener, () -> { - List deletedBlobs = cleanupStaleRootFiles(newRepoData.getGenId() - 1, deletedSnapshots, staleRootBlobs); - return new DeleteResult(deletedBlobs.size(), deletedBlobs.stream().mapToLong(name -> rootBlobs.get(name).length()).sum()); - })); + final var survivingIndexIds = newRepoData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet()); + for (final var indexEntry : foundIndices.entrySet()) { + final var indexSnId = indexEntry.getKey(); + if (survivingIndexIds.contains(indexSnId)) { + continue; + } + staleBlobDeleteRunner.enqueueTask(listeners.acquire(ref -> { + try (ref) { + logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); + final var deleteResult = indexEntry.getValue().delete(OperationPurpose.SNAPSHOT); + blobsDeleted.addAndGet(deleteResult.blobsDeleted()); + bytesDeleted.addAndGet(deleteResult.bytesDeleted()); + logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexSnId); + } catch (IOException e) { + logger.warn(() -> format(""" + [%s] index %s is no longer part of any snapshot in the repository, \ + but failed to clean up its index folder""", metadata.name(), indexSnId), e); + } + })); + } } - final Set survivingIndexIds = newRepoData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet()); - if (foundIndices.keySet().equals(survivingIndexIds)) { - groupedListener.onResponse(DeleteResult.ZERO); - } else { - executor.execute(ActionRunnable.supply(groupedListener, () -> cleanupStaleIndices(foundIndices, survivingIndexIds))); - } + // If we did the cleanup of stale indices purely using a throttled executor then there would be no backpressure to prevent us from + // falling arbitrarily far behind. But nor do we want to dedicate all the SNAPSHOT threads to stale index cleanups because that + // would slow down other snapshot operations in situations that do not need backpressure. + // + // The solution is to dedicate one SNAPSHOT thread to doing the cleanups eagerly, alongside the throttled executor which spreads + // the rest of the work across the other threads if they are free. If the eager cleanup loop doesn't finish before the next one + // starts then we dedicate another SNAPSHOT thread to the deletions, and so on, until eventually either we catch up or the SNAPSHOT + // pool is fully occupied with blob deletions, which pushes back on other snapshot operations. + + staleBlobDeleteRunner.runSyncTasksEagerly(threadPool.executor(ThreadPool.Names.SNAPSHOT)); } /** @@ -1183,8 +1225,8 @@ private void cleanupStaleBlobs( * TODO: Add shard level cleanups * TODO: Add unreferenced index metadata cleanup *
    - *
  • Deleting stale indices {@link #cleanupStaleIndices}
  • - *
  • Deleting unreferenced root level blobs {@link #cleanupStaleRootFiles}
  • + *
  • Deleting stale indices
  • + *
  • Deleting unreferenced root level blobs
  • *
* @param repositoryStateId Current repository state id * @param repositoryMetaVersion version of the updated repository metadata to write @@ -1262,70 +1304,25 @@ private static List staleRootBlobs(RepositoryData repositoryData, Set cleanupStaleRootFiles( - long previousGeneration, - Collection deletedSnapshots, - List blobsToDelete - ) { - if (blobsToDelete.isEmpty()) { - return blobsToDelete; - } - try { - if (logger.isInfoEnabled()) { - // If we're running root level cleanup as part of a snapshot delete we should not log the snapshot- and global metadata - // blobs associated with the just deleted snapshots as they are expected to exist and not stale. Otherwise every snapshot - // delete would also log a confusing INFO message about "stale blobs". - final Set blobNamesToIgnore = deletedSnapshots.stream() - .flatMap( - snapshotId -> Stream.of( - GLOBAL_METADATA_FORMAT.blobName(snapshotId.getUUID()), - SNAPSHOT_FORMAT.blobName(snapshotId.getUUID()), - INDEX_FILE_PREFIX + previousGeneration - ) + private void logStaleRootLevelBlobs(long previousGeneration, Collection deletedSnapshots, List blobsToDelete) { + if (logger.isInfoEnabled()) { + // If we're running root level cleanup as part of a snapshot delete we should not log the snapshot- and global metadata + // blobs associated with the just deleted snapshots as they are expected to exist and not stale. Otherwise every snapshot + // delete would also log a confusing INFO message about "stale blobs". + final Set blobNamesToIgnore = deletedSnapshots.stream() + .flatMap( + snapshotId -> Stream.of( + GLOBAL_METADATA_FORMAT.blobName(snapshotId.getUUID()), + SNAPSHOT_FORMAT.blobName(snapshotId.getUUID()), + INDEX_FILE_PREFIX + previousGeneration ) - .collect(Collectors.toSet()); - final List blobsToLog = blobsToDelete.stream().filter(b -> blobNamesToIgnore.contains(b) == false).toList(); - if (blobsToLog.isEmpty() == false) { - logger.info("[{}] Found stale root level blobs {}. Cleaning them up", metadata.name(), blobsToLog); - } - } - deleteFromContainer(blobContainer(), blobsToDelete.iterator()); - return blobsToDelete; - } catch (Exception e) { - logger.warn( - () -> format( - "[%s] The following blobs are no longer part of any snapshot [%s] but failed to remove them", - metadata.name(), - blobsToDelete - ), - e - ); - } - return Collections.emptyList(); - } - - private DeleteResult cleanupStaleIndices(Map foundIndices, Set survivingIndexIds) { - DeleteResult deleteResult = DeleteResult.ZERO; - for (Map.Entry indexEntry : foundIndices.entrySet()) { - final String indexSnId = indexEntry.getKey(); - try { - if (survivingIndexIds.contains(indexSnId) == false) { - logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); - deleteResult = deleteResult.add(indexEntry.getValue().delete(OperationPurpose.SNAPSHOT)); - logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexSnId); - } - } catch (Exception e) { - logger.warn( - () -> format( - "[%s] index %s is no longer part of any snapshot in the repository, " + "but failed to clean up its index folder", - metadata.name(), - indexSnId - ), - e - ); + ) + .collect(Collectors.toSet()); + final List blobsToLog = blobsToDelete.stream().filter(b -> blobNamesToIgnore.contains(b) == false).toList(); + if (blobsToLog.isEmpty() == false) { + logger.info("[{}] Found stale root level blobs {}. Cleaning them up", metadata.name(), blobsToLog); } } - return deleteResult; } @Override diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunnerTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunnerTests.java index 7298512603b7a..b7524b0ad215e 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunnerTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunnerTests.java @@ -21,6 +21,7 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -141,6 +142,55 @@ public void onResponse(Releasable releasable) { assertNoRunningTasks(taskRunner); } + public void testRunSyncTasksEagerly() { + final int maxTasks = randomIntBetween(1, maxThreads); + final int taskCount = between(maxTasks, maxTasks * 2); + final var barrier = new CyclicBarrier(maxTasks + 1); + final var executedCountDown = new CountDownLatch(taskCount); + final var testThread = Thread.currentThread(); + + class TestTask implements ActionListener { + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + + @Override + public void onResponse(Releasable releasable) { + try (releasable) { + if (Thread.currentThread() != testThread) { + safeAwait(barrier); + safeAwait(barrier); + } + } finally { + executedCountDown.countDown(); + } + } + } + + final BlockingQueue queue = ConcurrentCollections.newBlockingQueue(); + final AbstractThrottledTaskRunner taskRunner = new AbstractThrottledTaskRunner<>("test", maxTasks, executor, queue); + for (int i = 0; i < taskCount; i++) { + taskRunner.enqueueTask(new TestTask()); + } + + safeAwait(barrier); + assertThat(taskRunner.runningTasks(), equalTo(maxTasks)); // maxTasks tasks are running now + assertEquals(taskCount - maxTasks, queue.size()); // the remainder are enqueued + + final var capturedTask = new AtomicReference(); + taskRunner.runSyncTasksEagerly(t -> assertTrue(capturedTask.compareAndSet(null, t))); + assertEquals(taskCount - maxTasks, queue.size()); // hasn't run any tasks yet + capturedTask.get().run(); + assertTrue(queue.isEmpty()); + + safeAwait(barrier); + safeAwait(executedCountDown); + assertTrue(queue.isEmpty()); + assertNoRunningTasks(taskRunner); + } + public void testFailsTasksOnRejectionOrShutdown() throws Exception { final var executor = randomBoolean() ? EsExecutors.newScaling("test", maxThreads, maxThreads, 0, TimeUnit.MILLISECONDS, true, threadFactory, threadContext) diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index d5b59ef3274ea..1e4c328e9b1ac 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -170,6 +170,8 @@ public long getFailureCount() { private volatile boolean blocked = false; + private volatile boolean failOnDeleteContainer = false; + public MockRepository( RepositoryMetadata metadata, Environment environment, @@ -352,6 +354,13 @@ public void setBlockOnceOnReadSnapshotInfoIfAlreadyBlocked() { blockOnceOnReadSnapshotInfo.set(true); } + /** + * Sets the fail-on-delete-container flag, which if {@code true} throws an exception when deleting a {@link BlobContainer}. + */ + public void setFailOnDeleteContainer(boolean failOnDeleteContainer) { + this.failOnDeleteContainer = failOnDeleteContainer; + } + public boolean blocked() { return blocked; } @@ -550,6 +559,9 @@ public InputStream readBlob(OperationPurpose purpose, String name, long position @Override public DeleteResult delete(OperationPurpose purpose) throws IOException { + if (failOnDeleteContainer) { + throw new IOException("simulated delete-container failure"); + } DeleteResult deleteResult = DeleteResult.ZERO; for (BlobContainer child : children(purpose).values()) { deleteResult = deleteResult.add(child.delete(purpose)); From 3919a136a0fcd175c89dbdc85673b3502c401417 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Mon, 9 Oct 2023 15:18:43 +0200 Subject: [PATCH 075/176] Reenable HeapAttackIT testHugeManyConcat() (#100511) Reenable testHugeManyConcat(). --- .../elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java index 55b4454ce2105..03c207c2c211e 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java @@ -192,7 +192,6 @@ public void testManyConcat() throws IOException { /** * Hits a circuit breaker by building many moderately long strings. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") public void testHugeManyConcat() throws IOException { initManyLongs(); assertCircuitBreaks(() -> manyConcat(2000)); From 606fc13af59690c28a90577b9605ca842e7571c9 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 9 Oct 2023 08:44:41 -0500 Subject: [PATCH 076/176] Update docs for v8.10.3 release (#100432) (#100512) --- docs/changelog/99604.yaml | 2 +- docs/reference/release-notes.asciidoc | 2 + docs/reference/release-notes/8.10.3.asciidoc | 70 ++++++++++++++++++++ 3 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 docs/reference/release-notes/8.10.3.asciidoc diff --git a/docs/changelog/99604.yaml b/docs/changelog/99604.yaml index 7b473a056d608..0bace7aef1b26 100644 --- a/docs/changelog/99604.yaml +++ b/docs/changelog/99604.yaml @@ -1,5 +1,5 @@ pr: 99604 -summary: Show concrete error when enrich index not exist rather than NPE +summary: Show a concrete error when the enrich index does not exist rather than a NullPointerException area: Ingest Node type: enhancement issues: [] diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 78e15ff55f7e9..6c4e5cd9a5250 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. * <> * <> +* <> * <> * <> * <> @@ -54,6 +55,7 @@ This section summarizes the changes in each release. include::release-notes/8.12.0.asciidoc[] include::release-notes/8.11.0.asciidoc[] +include::release-notes/8.10.3.asciidoc[] include::release-notes/8.10.2.asciidoc[] include::release-notes/8.10.1.asciidoc[] include::release-notes/8.10.0.asciidoc[] diff --git a/docs/reference/release-notes/8.10.3.asciidoc b/docs/reference/release-notes/8.10.3.asciidoc new file mode 100644 index 0000000000000..a09beb26b4d27 --- /dev/null +++ b/docs/reference/release-notes/8.10.3.asciidoc @@ -0,0 +1,70 @@ +[[release-notes-8.10.3]] +== {es} version 8.10.3 + +coming[8.10.3] + +Also see <>. + +[[bug-8.10.3]] +[float] +=== Bug fixes + +Aggregations:: +* Fix cardinality agg for `const_keyword` {es-pull}99814[#99814] (issue: {es-issue}99776[#99776]) + +Distributed:: +* Skip settings validation during desired nodes updates {es-pull}99946[#99946] + +Highlighting:: +* Implement matches() on `SourceConfirmedTextQuery` {es-pull}100252[#100252] + +ILM+SLM:: +* ILM introduce the `check-ts-end-time-passed` step {es-pull}100179[#100179] (issue: {es-issue}99696[#99696]) +* ILM the delete action waits for a TSDS index time/bounds to lapse {es-pull}100207[#100207] + +Ingest Node:: +* Validate enrich index before completing policy execution {es-pull}100106[#100106] + +Machine Learning:: +* Adding retry logic for start model deployment API {es-pull}99673[#99673] +* Using 1 MB chunks for elser model storage {es-pull}99677[#99677] + +Search:: +* Close expired search contexts on SEARCH thread {es-pull}99660[#99660] +* Fix fields API for `geo_point` fields inside other arrays {es-pull}99868[#99868] (issue: {es-issue}99781[#99781]) + +Snapshot/Restore:: +* Support $ and / in restore rename replacements {es-pull}99892[#99892] (issue: {es-issue}99078[#99078]) + +Transform:: +* Do not use PIT in the presence of remote indices in source {es-pull}99803[#99803] +* Ignore "index not found" error when `delete_dest_index` flag is set but the dest index doesn't exist {es-pull}99738[#99738] +* Let `_stats` internally timeout if checkpoint information can not be retrieved {es-pull}99914[#99914] + +Vector Search:: +* Update version range in `jvm.options` for the Panama Vector API {es-pull}99846[#99846] + +[[enhancement-8.10.3]] +[float] +=== Enhancements + +Authorization:: +* Add manage permission for fleet managed threat intel indices {es-pull}99231[#99231] + +Highlighting:: +* Implement matches() on `SourceConfirmedTextQuery` {es-pull}100134[#100134] + +Ingest Node:: +* Show a concrete error when the enrich index does not exist rather than a NullPointerException {es-pull}99604[#99604] + +Search:: +* Add checks in term and terms queries that input terms are not too long {es-pull}99818[#99818] (issue: {es-issue}99802[#99802]) + +[[upgrade-8.10.3]] +[float] +=== Upgrades + +Packaging:: +* Upgrade bundled JDK to Java 21 {es-pull}99724[#99724] + + From bd8a2f88f3909a7df1187e7191bf130d1ebc25b9 Mon Sep 17 00:00:00 2001 From: Jonathan Buttner <56361221+jonathan-buttner@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:03:55 -0400 Subject: [PATCH 077/176] [ML] Add http client for inference API services (#100359) * Adding tests for http client * Finishing http client tests * Adding remaining tests * Addressing style issues * Refactoring logging * Refactoring evictor * Addressing feedback --------- Co-authored-by: Elastic Machine --- .../inference/src/main/java/module-info.java | 4 + .../xpack/inference/InferencePlugin.java | 44 ++++ .../common/SizeLimitInputStream.java | 81 ++++++ .../inference/external/http/HttpClient.java | 149 +++++++++++ .../inference/external/http/HttpResult.java | 46 ++++ .../inference/external/http/HttpSettings.java | 114 ++++++++ .../external/http/IdleConnectionEvictor.java | 79 ++++++ .../common/SizeLimitInputStreamTests.java | 92 +++++++ .../external/http/HttpClientTests.java | 247 ++++++++++++++++++ .../http/IdleConnectionEvictorTests.java | 171 ++++++++++++ 10 files changed, 1027 insertions(+) create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStream.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpResult.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStreamTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictorTests.java diff --git a/x-pack/plugin/inference/src/main/java/module-info.java b/x-pack/plugin/inference/src/main/java/module-info.java index b21f919bbdc8a..2aec99a27c849 100644 --- a/x-pack/plugin/inference/src/main/java/module-info.java +++ b/x-pack/plugin/inference/src/main/java/module-info.java @@ -13,6 +13,10 @@ requires org.apache.httpcomponents.httpclient; requires org.apache.logging.log4j; + requires org.apache.httpcomponents.httpcore; + requires org.apache.httpcomponents.httpasyncclient; + requires org.apache.httpcomponents.httpcore.nio; + requires org.apache.lucene.core; exports org.elasticsearch.xpack.inference.rest; exports org.elasticsearch.xpack.inference.action; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index c2d69a64df75c..25439d0bfc930 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.internal.Client; @@ -17,8 +18,11 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.IndicesService; @@ -32,6 +36,8 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -44,6 +50,8 @@ import org.elasticsearch.xpack.inference.action.TransportGetInferenceModelAction; import org.elasticsearch.xpack.inference.action.TransportInferenceAction; import org.elasticsearch.xpack.inference.action.TransportPutInferenceModelAction; +import org.elasticsearch.xpack.inference.external.http.HttpClient; +import org.elasticsearch.xpack.inference.external.http.HttpSettings; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.rest.RestDeleteInferenceModelAction; import org.elasticsearch.xpack.inference.rest.RestGetInferenceModelAction; @@ -58,6 +66,13 @@ public class InferencePlugin extends Plugin implements ActionPlugin, InferenceServicePlugin, SystemIndexPlugin { public static final String NAME = "inference"; + public static final String UTILITY_THREAD_POOL_NAME = "inference_utility"; + private final Settings settings; + private final SetOnce httpClient = new SetOnce<>(); + + public InferencePlugin(Settings settings) { + this.settings = settings; + } @Override public List> getActions() { @@ -104,6 +119,9 @@ public Collection createComponents( AllocationService allocationService, IndicesService indicesService ) { + var httpSettings = new HttpSettings(settings, clusterService); + httpClient.set(HttpClient.create(httpSettings, threadPool)); + ModelRegistry modelRegistry = new ModelRegistry(client); return List.of(modelRegistry); } @@ -135,6 +153,25 @@ public Collection getSystemIndexDescriptors(Settings sett ); } + @Override + public List> getExecutorBuilders(Settings unused) { + ScalingExecutorBuilder utility = new ScalingExecutorBuilder( + UTILITY_THREAD_POOL_NAME, + 0, + 1, + TimeValue.timeValueMinutes(10), + false, + "xpack.inference.utility_thread_pool" + ); + + return List.of(utility); + } + + @Override + public List> getSettings() { + return HttpSettings.getSettings(); + } + @Override public String getFeatureName() { return "inference_plugin"; @@ -154,4 +191,11 @@ public List getInferenceServiceFactories() { public List getInferenceServiceNamedWriteables() { return InferenceNamedWriteablesProvider.getNamedWriteables(); } + + @Override + public void close() { + if (httpClient.get() != null) { + IOUtils.closeWhileHandlingException(httpClient.get()); + } + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStream.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStream.java new file mode 100644 index 0000000000000..78e7b5cbbd95e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStream.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.inference.common; + +import org.elasticsearch.common.unit.ByteSizeValue; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.concurrent.atomic.AtomicLong; + +/** + * An input stream throwing an exception when a preconfigured number of bytes is reached + * This input stream exists to prevent reading streaming or very big requests + * + * This implementation does not support mark/reset to prevent complex byte counting recalculations + */ +public final class SizeLimitInputStream extends FilterInputStream { + + private final long maxByteSize; + private final AtomicLong byteCounter = new AtomicLong(0); + + /** + * Creates a new input stream, that throws an exception after a certain number of bytes is read + * @param maxByteSize The maximum data to read, before throwing an exception + * @param in The underlying input stream containing the data + */ + public SizeLimitInputStream(ByteSizeValue maxByteSize, InputStream in) { + super(in); + this.maxByteSize = maxByteSize.getBytes(); + } + + @Override + public int read() throws IOException { + int bytesRead = super.read(); + + if (bytesRead != -1) { + byteCounter.incrementAndGet(); + checkMaximumLengthReached(); + } + + return bytesRead; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int bytesRead = super.read(b, off, len); + + if (bytesRead != -1) { + byteCounter.addAndGet(len); + checkMaximumLengthReached(); + } + + return bytesRead; + } + + @Override + public synchronized void mark(int readlimit) { + throw new UnsupportedOperationException("mark not supported"); + } + + @Override + public synchronized void reset() throws IOException { + throw new IOException("reset not supported"); + } + + @Override + public boolean markSupported() { + return false; + } + + private void checkMaximumLengthReached() throws IOException { + if (byteCounter.get() > maxByteSize) { + throw new IOException("Maximum limit of [" + maxByteSize + "] bytes reached"); + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java new file mode 100644 index 0000000000000..5e3ceac875921 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.apache.http.HttpResponse; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.concurrent.FutureCallback; +import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; +import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; +import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor; +import org.apache.http.nio.reactor.ConnectingIOReactor; +import org.apache.http.nio.reactor.IOReactorException; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.common.socket.SocketAccess; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.CancellationException; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; + +public class HttpClient implements Closeable { + private static final Logger logger = LogManager.getLogger(HttpClient.class); + + enum Status { + CREATED, + STARTED, + STOPPED + } + + private final CloseableHttpAsyncClient client; + private final IdleConnectionEvictor connectionEvictor; + private final AtomicReference status = new AtomicReference<>(Status.CREATED); + private final ThreadPool threadPool; + private final HttpSettings settings; + + public static HttpClient create(HttpSettings settings, ThreadPool threadPool) { + PoolingNHttpClientConnectionManager connectionManager = createConnectionManager(); + IdleConnectionEvictor connectionEvictor = new IdleConnectionEvictor( + threadPool, + connectionManager, + settings.getEvictionInterval(), + settings.getEvictionMaxIdle() + ); + + int maxConnections = settings.getMaxConnections(); + CloseableHttpAsyncClient client = createAsyncClient(connectionManager, maxConnections); + + return new HttpClient(settings, client, connectionEvictor, threadPool); + } + + private static PoolingNHttpClientConnectionManager createConnectionManager() { + ConnectingIOReactor ioReactor; + try { + ioReactor = new DefaultConnectingIOReactor(); + } catch (IOReactorException e) { + var message = "Failed to initialize the inference http client"; + logger.error(message, e); + throw new ElasticsearchException(message, e); + } + + return new PoolingNHttpClientConnectionManager(ioReactor); + } + + private static CloseableHttpAsyncClient createAsyncClient(PoolingNHttpClientConnectionManager connectionManager, int maxConnections) { + HttpAsyncClientBuilder clientBuilder = HttpAsyncClientBuilder.create(); + + clientBuilder.setConnectionManager(connectionManager); + clientBuilder.setMaxConnPerRoute(maxConnections); + clientBuilder.setMaxConnTotal(maxConnections); + // The apache client will be shared across all connections because it can be expensive to create it + // so we don't want to support cookies to avoid accidental authentication for unauthorized users + clientBuilder.disableCookieManagement(); + + return clientBuilder.build(); + } + + // Default for testing + HttpClient(HttpSettings settings, CloseableHttpAsyncClient asyncClient, IdleConnectionEvictor evictor, ThreadPool threadPool) { + this.settings = settings; + this.threadPool = threadPool; + this.client = asyncClient; + this.connectionEvictor = evictor; + } + + public void start() { + if (status.compareAndSet(Status.CREATED, Status.STARTED)) { + client.start(); + connectionEvictor.start(); + } + } + + public void send(HttpUriRequest request, ActionListener listener) throws IOException { + // The caller must call start() first before attempting to send a request + assert status.get() == Status.STARTED; + + SocketAccess.doPrivileged(() -> client.execute(request, new FutureCallback<>() { + @Override + public void completed(HttpResponse response) { + respondUsingUtilityThread(response, request, listener); + } + + @Override + public void failed(Exception ex) { + logger.error(format("Request [%s] failed", request.getRequestLine()), ex); + failUsingUtilityThread(ex, listener); + } + + @Override + public void cancelled() { + failUsingUtilityThread(new CancellationException(format("Request [%s] was cancelled", request.getRequestLine())), listener); + } + })); + } + + private void respondUsingUtilityThread(HttpResponse response, HttpUriRequest request, ActionListener listener) { + threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(() -> { + try { + listener.onResponse(HttpResult.create(settings.getMaxResponseSize(), response)); + } catch (Exception e) { + logger.error(format("Failed to create http result for [%s]", request.getRequestLine()), e); + listener.onFailure(e); + } + }); + } + + private void failUsingUtilityThread(Exception exception, ActionListener listener) { + threadPool.executor(UTILITY_THREAD_POOL_NAME).execute(() -> listener.onFailure(exception)); + } + + @Override + public void close() throws IOException { + status.set(Status.STOPPED); + client.close(); + connectionEvictor.stop(); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpResult.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpResult.java new file mode 100644 index 0000000000000..82256b51cf83e --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpResult.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.apache.http.HttpResponse; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Streams; +import org.elasticsearch.xpack.inference.common.SizeLimitInputStream; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Objects; + +public record HttpResult(HttpResponse response, byte[] body) { + + public static HttpResult create(ByteSizeValue maxResponseSize, HttpResponse response) throws IOException { + return new HttpResult(response, limitBody(maxResponseSize, response)); + } + + private static byte[] limitBody(ByteSizeValue maxResponseSize, HttpResponse response) throws IOException { + if (response.getEntity() == null) { + return new byte[0]; + } + + final byte[] body; + try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { + try (InputStream is = new SizeLimitInputStream(maxResponseSize, response.getEntity().getContent())) { + Streams.copy(is, outputStream); + } + body = outputStream.toByteArray(); + } + + return body; + } + + public HttpResult { + Objects.requireNonNull(response); + Objects.requireNonNull(body); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java new file mode 100644 index 0000000000000..420f7822df06c --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.TimeValue; + +import java.util.List; + +public class HttpSettings { + // These settings are default scope for testing + static final Setting MAX_HTTP_RESPONSE_SIZE = Setting.byteSizeSetting( + "xpack.inference.http.max_response_size", + new ByteSizeValue(10, ByteSizeUnit.MB), // default + ByteSizeValue.ONE, // min + new ByteSizeValue(50, ByteSizeUnit.MB), // max + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + static final Setting MAX_CONNECTIONS = Setting.intSetting( + "xpack.inference.http.max_connections", + 500, + 1, + // TODO pick a reasonable value here + 1000, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private static final TimeValue DEFAULT_CONNECTION_EVICTION_THREAD_INTERVAL_TIME = TimeValue.timeValueSeconds(10); + + static final Setting CONNECTION_EVICTION_THREAD_INTERVAL_SETTING = Setting.timeSetting( + "xpack.inference.http.connection_eviction_interval", + DEFAULT_CONNECTION_EVICTION_THREAD_INTERVAL_TIME, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private static final TimeValue DEFAULT_CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING = DEFAULT_CONNECTION_EVICTION_THREAD_INTERVAL_TIME; + static final Setting CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING = Setting.timeSetting( + "xpack.inference.http.connection_eviction_max_idle_time", + DEFAULT_CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private volatile ByteSizeValue maxResponseSize; + private volatile int maxConnections; + private volatile TimeValue evictionInterval; + private volatile TimeValue evictionMaxIdle; + + public HttpSettings(Settings settings, ClusterService clusterService) { + this.maxResponseSize = MAX_HTTP_RESPONSE_SIZE.get(settings); + this.maxConnections = MAX_CONNECTIONS.get(settings); + this.evictionInterval = CONNECTION_EVICTION_THREAD_INTERVAL_SETTING.get(settings); + this.evictionMaxIdle = CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING.get(settings); + + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_HTTP_RESPONSE_SIZE, this::setMaxResponseSize); + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_CONNECTIONS, this::setMaxConnections); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CONNECTION_EVICTION_THREAD_INTERVAL_SETTING, this::setEvictionInterval); + clusterService.getClusterSettings().addSettingsUpdateConsumer(CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING, this::setEvictionMaxIdle); + } + + public ByteSizeValue getMaxResponseSize() { + return maxResponseSize; + } + + public int getMaxConnections() { + return maxConnections; + } + + public TimeValue getEvictionInterval() { + return evictionInterval; + } + + public TimeValue getEvictionMaxIdle() { + return evictionMaxIdle; + } + + private void setMaxResponseSize(ByteSizeValue maxResponseSize) { + this.maxResponseSize = maxResponseSize; + } + + private void setMaxConnections(int maxConnections) { + this.maxConnections = maxConnections; + } + + private void setEvictionInterval(TimeValue evictionInterval) { + this.evictionInterval = evictionInterval; + } + + private void setEvictionMaxIdle(TimeValue evictionMaxIdle) { + this.evictionMaxIdle = evictionMaxIdle; + } + + public static List> getSettings() { + return List.of( + MAX_HTTP_RESPONSE_SIZE, + MAX_CONNECTIONS, + CONNECTION_EVICTION_THREAD_INTERVAL_SETTING, + CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING + ); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java new file mode 100644 index 0000000000000..3ea0bc04848e0 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.apache.http.nio.conn.NHttpClientConnectionManager; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; + +/** + * Starts a monitoring task to remove expired and idle connections from the HTTP connection pool. + * This is modeled off of https://github.com/apache/httpcomponents-client/blob/master/httpclient5/ + * src/main/java/org/apache/hc/client5/http/impl/IdleConnectionEvictor.java + * + * NOTE: This class should be removed once the apache async client is upgraded to 5.x because that version of the library + * includes this already. + * + * See here for more info. + */ +public class IdleConnectionEvictor { + private static final Logger logger = LogManager.getLogger(IdleConnectionEvictor.class); + + private final ThreadPool threadPool; + private final NHttpClientConnectionManager connectionManager; + private final TimeValue sleepTime; + private final TimeValue maxIdleTime; + private Scheduler.Cancellable cancellableTask; + + public IdleConnectionEvictor( + ThreadPool threadPool, + NHttpClientConnectionManager connectionManager, + TimeValue sleepTime, + TimeValue maxIdleTime + ) { + this.threadPool = threadPool; + this.connectionManager = Objects.requireNonNull(connectionManager); + this.sleepTime = sleepTime; + this.maxIdleTime = maxIdleTime; + } + + public synchronized void start() { + if (cancellableTask == null) { + startInternal(); + } + } + + private void startInternal() { + cancellableTask = threadPool.scheduleWithFixedDelay(() -> { + try { + connectionManager.closeExpiredConnections(); + if (maxIdleTime != null) { + connectionManager.closeIdleConnections(maxIdleTime.millis(), TimeUnit.MILLISECONDS); + } + } catch (Exception e) { + logger.warn("HTTP connection eviction failed", e); + } + }, sleepTime, threadPool.executor(UTILITY_THREAD_POOL_NAME)); + } + + public void stop() { + cancellableTask.cancel(); + } + + public boolean isRunning() { + return cancellableTask != null && cancellableTask.isCancelled() == false; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStreamTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStreamTests.java new file mode 100644 index 0000000000000..638c1858df45c --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/common/SizeLimitInputStreamTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.common; + +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.test.ESTestCase; + +import java.io.ByteArrayInputStream; +import java.io.IOException; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.elasticsearch.core.Strings.format; +import static org.hamcrest.Matchers.is; + +public class SizeLimitInputStreamTests extends ESTestCase { + public void testRead_WithoutThrowingException() throws IOException { + int size = randomIntBetween(1, 100); + + try (var stream = createRandomLimitedStream(size, size)) { + for (int i = 0; i < size; i++) { + stream.read(new byte[size]); + } + } + } + + public void testRead_OneByteAtATime_WithoutThrowingException() throws IOException { + int size = randomIntBetween(1, 100); + + try (var stream = createRandomLimitedStream(size, size)) { + for (int i = 0; i < size; i++) { + stream.read(); + } + } + } + + public void testRead_ThrowsException_WhenLimitReached() { + int maxAllowed = randomIntBetween(1, 100); + int dataSize = maxAllowed + 1; + + IOException e = expectThrows(IOException.class, () -> { + try (var stream = createRandomLimitedStream(maxAllowed + 1, maxAllowed)) { + stream.read(new byte[dataSize]); + } + }); + + assertThat(e.getMessage(), is(format("Maximum limit of [%s] bytes reached", maxAllowed))); + } + + public void testRead_OneByteAtATime_ThrowsException_WhenLimitReached() { + int maxAllowed = randomIntBetween(1, 100); + int dataSize = maxAllowed + 1; + + IOException e = expectThrows(IOException.class, () -> { + try (var stream = createRandomLimitedStream(maxAllowed + 1, maxAllowed)) { + for (int i = 0; i < dataSize; i++) { + stream.read(); + } + } + }); + + assertThat(e.getMessage(), is(format("Maximum limit of [%s] bytes reached", maxAllowed))); + } + + public void testMarkAndReset_ThrowsUnsupportedException() throws IOException { + int size = randomIntBetween(1, 100); + + try (var stream = createRandomLimitedStream(size, size)) { + assertThat(stream.markSupported(), is(false)); + + UnsupportedOperationException unsupportedOperationException = expectThrows( + UnsupportedOperationException.class, + () -> stream.mark(10) + ); + assertThat(unsupportedOperationException.getMessage(), is("mark not supported")); + + IOException e = expectThrows(IOException.class, stream::reset); + assertThat(e.getMessage(), is("reset not supported")); + } + } + + private static SizeLimitInputStream createRandomLimitedStream(int dataSize, int maxAllowedSize) { + String data = randomAlphaOfLength(dataSize); + ByteSizeValue byteSizeValue = new ByteSizeValue(maxAllowedSize, ByteSizeUnit.BYTES); + return new SizeLimitInputStream(byteSizeValue, new ByteArrayInputStream(data.getBytes(UTF_8))); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java new file mode 100644 index 0000000000000..42c8422af3982 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java @@ -0,0 +1,247 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.apache.http.HttpHeaders; +import org.apache.http.HttpResponse; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.concurrent.FutureCallback; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; +import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor; +import org.apache.http.nio.reactor.IOReactorException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ScalingExecutorBuilder; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.junit.After; +import org.junit.Before; + +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.util.HashSet; +import java.util.concurrent.CancellationException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class HttpClientTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = new TestThreadPool( + getTestName(), + new ScalingExecutorBuilder( + UTILITY_THREAD_POOL_NAME, + 1, + 4, + TimeValue.timeValueMinutes(10), + false, + "xpack.inference.utility_thread_pool" + ) + ); + } + + @After + public void shutdown() { + terminate(threadPool); + webServer.close(); + } + + public void testSend_MockServerReceivesRequest() throws Exception { + int responseCode = randomIntBetween(200, 203); + String body = randomAlphaOfLengthBetween(2, 8096); + webServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body)); + + String paramKey = randomAlphaOfLength(3); + String paramValue = randomAlphaOfLength(3); + var httpPost = createHttpPost(webServer.getPort(), paramKey, paramValue); + + try (var httpClient = HttpClient.create(emptyHttpSettings(), threadPool)) { + httpClient.start(); + + PlainActionFuture listener = new PlainActionFuture<>(); + httpClient.send(httpPost, listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.response().getStatusLine().getStatusCode(), equalTo(responseCode)); + assertThat(new String(result.body(), StandardCharsets.UTF_8), is(body)); + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getPath(), equalTo(httpPost.getURI().getPath())); + assertThat(webServer.requests().get(0).getUri().getQuery(), equalTo(paramKey + "=" + paramValue)); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + } + } + + public void testSend_FailedCallsOnFailure() throws Exception { + var asyncClient = mock(CloseableHttpAsyncClient.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + FutureCallback listener = (FutureCallback) invocation.getArguments()[1]; + listener.failed(new ElasticsearchException("failure")); + return mock(Future.class); + }).when(asyncClient).execute(any(), any()); + + var evictor = createEvictor(threadPool); + var httpPost = createHttpPost(webServer.getPort(), "a", "b"); + + try (var client = new HttpClient(emptyHttpSettings(), asyncClient, evictor, threadPool)) { + client.start(); + + PlainActionFuture listener = new PlainActionFuture<>(); + client.send(httpPost, listener); + + var thrownException = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is("failure")); + } + } + + public void testSend_CancelledCallsOnFailure() throws Exception { + var asyncClient = mock(CloseableHttpAsyncClient.class); + + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + FutureCallback listener = (FutureCallback) invocation.getArguments()[1]; + listener.cancelled(); + return mock(Future.class); + }).when(asyncClient).execute(any(), any()); + + var evictor = createEvictor(threadPool); + var httpPost = createHttpPost(webServer.getPort(), "a", "b"); + + try (var client = new HttpClient(emptyHttpSettings(), asyncClient, evictor, threadPool)) { + client.start(); + + PlainActionFuture listener = new PlainActionFuture<>(); + client.send(httpPost, listener); + + var thrownException = expectThrows(CancellationException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(thrownException.getMessage(), is(format("Request [%s] was cancelled", httpPost.getRequestLine()))); + } + } + + @SuppressWarnings("unchecked") + public void testStart_MultipleCallsOnlyStartTheClientOnce() throws Exception { + var asyncClient = mock(CloseableHttpAsyncClient.class); + when(asyncClient.execute(any(), any())).thenReturn(mock(Future.class)); + + var evictor = createEvictor(threadPool); + var httpPost = createHttpPost(webServer.getPort(), "a", "b"); + + try (var client = new HttpClient(emptyHttpSettings(), asyncClient, evictor, threadPool)) { + client.start(); + + PlainActionFuture listener = new PlainActionFuture<>(); + client.send(httpPost, listener); + client.send(httpPost, listener); + + verify(asyncClient, times(1)).start(); + } + } + + public void testSend_FailsWhenMaxBytesReadIsExceeded() throws Exception { + int responseCode = randomIntBetween(200, 203); + String body = randomAlphaOfLengthBetween(10, 8096); + webServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body)); + + String paramKey = randomAlphaOfLength(3); + String paramValue = randomAlphaOfLength(3); + var httpPost = createHttpPost(webServer.getPort(), paramKey, paramValue); + + Settings settings = Settings.builder().put(HttpSettings.MAX_HTTP_RESPONSE_SIZE.getKey(), ByteSizeValue.ONE).build(); + var httpSettings = createHttpSettings(settings); + + try (var httpClient = HttpClient.create(httpSettings, threadPool)) { + httpClient.start(); + + PlainActionFuture listener = new PlainActionFuture<>(); + httpClient.send(httpPost, listener); + + var throwException = expectThrows(UncategorizedExecutionException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(throwException.getCause().getCause().getMessage(), is("Maximum limit of [1] bytes reached")); + } + } + + private static HttpPost createHttpPost(int port, String paramKey, String paramValue) throws URISyntaxException { + URI uri = new URIBuilder().setScheme("http") + .setHost("localhost") + .setPort(port) + .setPathSegments("/" + randomAlphaOfLength(5)) + .setParameter(paramKey, paramValue) + .build(); + + HttpPost httpPost = new HttpPost(uri); + + ByteArrayEntity byteEntity = new ByteArrayEntity( + randomAlphaOfLength(5).getBytes(StandardCharsets.UTF_8), + ContentType.APPLICATION_JSON + ); + httpPost.setEntity(byteEntity); + + httpPost.setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaType()); + return httpPost; + } + + private static IdleConnectionEvictor createEvictor(ThreadPool threadPool) throws IOReactorException { + var manager = createConnectionManager(); + return new IdleConnectionEvictor(threadPool, manager, new TimeValue(10, TimeUnit.SECONDS), new TimeValue(10, TimeUnit.SECONDS)); + } + + private static PoolingNHttpClientConnectionManager createConnectionManager() throws IOReactorException { + return new PoolingNHttpClientConnectionManager(new DefaultConnectingIOReactor()); + } + + private static HttpSettings emptyHttpSettings() { + return createHttpSettings(Settings.EMPTY); + } + + private static HttpSettings createHttpSettings(Settings settings) { + return new HttpSettings(settings, mockClusterService(settings)); + } + + private static ClusterService mockClusterService(Settings settings) { + var clusterService = mock(ClusterService.class); + + var cSettings = new ClusterSettings(settings, new HashSet<>(HttpSettings.getSettings())); + when(clusterService.getClusterSettings()).thenReturn(cSettings); + + return clusterService; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictorTests.java new file mode 100644 index 0000000000000..2cc00f35f9af6 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictorTests.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; +import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor; +import org.apache.http.nio.reactor.IOReactorException; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ScalingExecutorBuilder; +import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class IdleConnectionEvictorTests extends ESTestCase { + + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + private ThreadPool threadPool; + + @Before + public void init() { + threadPool = new TestThreadPool( + getTestName(), + new ScalingExecutorBuilder( + UTILITY_THREAD_POOL_NAME, + 1, + 4, + TimeValue.timeValueMinutes(10), + false, + "xpack.inference.utility_thread_pool" + ) + ); + } + + @After + public void shutdown() { + terminate(threadPool); + } + + public void testStart_CallsExecutorSubmit() throws IOReactorException { + var mockThreadPool = mock(ThreadPool.class); + + when(mockThreadPool.scheduleWithFixedDelay(any(Runnable.class), any(), any())).thenReturn(mock(Scheduler.Cancellable.class)); + + var evictor = new IdleConnectionEvictor( + mockThreadPool, + createConnectionManager(), + new TimeValue(1, TimeUnit.NANOSECONDS), + new TimeValue(1, TimeUnit.NANOSECONDS) + ); + + evictor.start(); + + verify(mockThreadPool, times(1)).scheduleWithFixedDelay(any(Runnable.class), any(), any()); + } + + public void testStart_OnlyCallsSubmitOnce() throws IOReactorException { + var mockThreadPool = mock(ThreadPool.class); + + when(mockThreadPool.scheduleWithFixedDelay(any(Runnable.class), any(), any())).thenReturn(mock(Scheduler.Cancellable.class)); + + var evictor = new IdleConnectionEvictor( + mockThreadPool, + createConnectionManager(), + new TimeValue(1, TimeUnit.NANOSECONDS), + new TimeValue(1, TimeUnit.NANOSECONDS) + ); + + evictor.start(); + evictor.start(); + + verify(mockThreadPool, times(1)).scheduleWithFixedDelay(any(Runnable.class), any(), any()); + } + + public void testCloseExpiredConnections_IsCalled() throws InterruptedException { + var manager = mock(PoolingNHttpClientConnectionManager.class); + + var evictor = new IdleConnectionEvictor( + threadPool, + manager, + new TimeValue(1, TimeUnit.NANOSECONDS), + new TimeValue(1, TimeUnit.NANOSECONDS) + ); + + CountDownLatch runLatch = new CountDownLatch(1); + doAnswer(invocation -> { + evictor.stop(); + runLatch.countDown(); + return Void.TYPE; + }).when(manager).closeExpiredConnections(); + + evictor.start(); + runLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); + + verify(manager, times(1)).closeExpiredConnections(); + } + + public void testCloseIdleConnections_IsCalled() throws InterruptedException { + var manager = mock(PoolingNHttpClientConnectionManager.class); + + var evictor = new IdleConnectionEvictor( + threadPool, + manager, + new TimeValue(1, TimeUnit.NANOSECONDS), + new TimeValue(1, TimeUnit.NANOSECONDS) + ); + + CountDownLatch runLatch = new CountDownLatch(1); + doAnswer(invocation -> { + evictor.stop(); + runLatch.countDown(); + return Void.TYPE; + }).when(manager).closeIdleConnections(anyLong(), any()); + + evictor.start(); + runLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); + + verify(manager, times(1)).closeIdleConnections(anyLong(), any()); + } + + public void testIsRunning_ReturnsTrue() throws IOReactorException { + var evictor = new IdleConnectionEvictor( + threadPool, + createConnectionManager(), + new TimeValue(1, TimeUnit.SECONDS), + new TimeValue(1, TimeUnit.SECONDS) + ); + + evictor.start(); + assertTrue(evictor.isRunning()); + evictor.stop(); + } + + public void testIsRunning_ReturnsFalse() throws IOReactorException { + var evictor = new IdleConnectionEvictor( + threadPool, + createConnectionManager(), + new TimeValue(1, TimeUnit.SECONDS), + new TimeValue(1, TimeUnit.SECONDS) + ); + + evictor.start(); + assertTrue(evictor.isRunning()); + + evictor.stop(); + assertFalse(evictor.isRunning()); + } + + private static PoolingNHttpClientConnectionManager createConnectionManager() throws IOReactorException { + return new PoolingNHttpClientConnectionManager(new DefaultConnectingIOReactor()); + } +} From 27989c308facb5e67719b4329f7824f8d9235678 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Mon, 9 Oct 2023 16:52:17 +0200 Subject: [PATCH 078/176] CCR: Use local cluster state request (#100323) Auto following indices from leader asks for the cluster state waiting for the next version to be available. These requests are no longer being forwarded to the master on the leader cluster, since waiting for the state to appear on the coordinator is equally good and avoids burdening the master with serializing a potentially large cluster state. --- docs/changelog/100323.yaml | 5 +++++ .../xpack/ccr/action/AutoFollowCoordinator.java | 1 + 2 files changed, 6 insertions(+) create mode 100644 docs/changelog/100323.yaml diff --git a/docs/changelog/100323.yaml b/docs/changelog/100323.yaml new file mode 100644 index 0000000000000..de50da6ec8cf9 --- /dev/null +++ b/docs/changelog/100323.yaml @@ -0,0 +1,5 @@ +pr: 100323 +summary: "CCR: Use local cluster state request" +area: CCR +type: bug +issues: [] diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 11274956f16f1..a7bf572e9bf73 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -303,6 +303,7 @@ void getRemoteClusterState( new ClusterStateRequest().clear() .metadata(true) .routingTable(true) + .local(true) .waitForMetadataVersion(metadataVersion) .waitForTimeout(waitForMetadataTimeOut), e -> handler.accept(null, e), From 9c72933e1f6d06c6fca822c39a2fbd916f35cf83 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Mon, 9 Oct 2023 11:10:04 -0400 Subject: [PATCH 079/176] [buildkite] Fix comment trigger regex --- .buildkite/pull-requests.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index d79f02a733b91..456fce6aba519 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -11,7 +11,7 @@ "set_commit_status": false, "build_on_commit": true, "build_on_comment": true, - "trigger_comment_regex": "buildkite\\W+elasticsearch-ci.+", + "trigger_comment_regex": "run\\W+elasticsearch-ci.+", "labels": [ "buildkite-opt-in" ], From d0c263bfa6e85cb82096be7e5e04b4c0a9eb1b3f Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Mon, 9 Oct 2023 11:25:20 -0400 Subject: [PATCH 080/176] Add healthcheck for shibboleth-idp in idp-fixture (again) (#100461) --- x-pack/test/idp-fixture/build.gradle | 5 +++++ x-pack/test/idp-fixture/docker-compose.yml | 8 +++++++- x-pack/test/idp-fixture/idp/bin/run-jetty.sh | 17 ++++++++++++++++- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/x-pack/test/idp-fixture/build.gradle b/x-pack/test/idp-fixture/build.gradle index 0f5363a278f60..86ed15435ad55 100644 --- a/x-pack/test/idp-fixture/build.gradle +++ b/x-pack/test/idp-fixture/build.gradle @@ -6,6 +6,11 @@ apply plugin: 'elasticsearch.test.fixtures' dockerCompose { composeAdditionalArgs = ['--compatibility'] + + if (System.getenv('BUILDKITE') == 'true') { + // This flag is only available on newer versions of docker-compose, and many Jenkins agents have older versions + upAdditionalArgs = ["--wait"] + } } tasks.named("preProcessFixture").configure { diff --git a/x-pack/test/idp-fixture/docker-compose.yml b/x-pack/test/idp-fixture/docker-compose.yml index 11a8ec7a7bb3d..e431fa4ede611 100644 --- a/x-pack/test/idp-fixture/docker-compose.yml +++ b/x-pack/test/idp-fixture/docker-compose.yml @@ -1,4 +1,4 @@ -version: '3.7' +version: "3.7" services: openldap: command: --copy-service --loglevel debug @@ -37,6 +37,12 @@ services: links: - openldap:openldap restart: always #ensure ephemeral port mappings are properly updated + healthcheck: + test: curl -f -s --http0.9 http://localhost:4443 --connect-timeout 10 --max-time 10 --output - > /dev/null + interval: 5s + timeout: 20s + retries: 60 + start_period: 10s oidc-provider: build: diff --git a/x-pack/test/idp-fixture/idp/bin/run-jetty.sh b/x-pack/test/idp-fixture/idp/bin/run-jetty.sh index af795963b9712..24ece94c2715d 100644 --- a/x-pack/test/idp-fixture/idp/bin/run-jetty.sh +++ b/x-pack/test/idp-fixture/idp/bin/run-jetty.sh @@ -10,4 +10,19 @@ fi export JETTY_ARGS="jetty.sslContext.keyStorePassword=$JETTY_BROWSER_SSL_KEYSTORE_PASSWORD jetty.backchannel.sslContext.keyStorePassword=$JETTY_BACKCHANNEL_SSL_KEYSTORE_PASSWORD" sed -i "s/^-Xmx.*$/-Xmx$JETTY_MAX_HEAP/g" /opt/shib-jetty-base/start.ini -exec /opt/jetty-home/bin/jetty.sh run +# For some reason, this container always immediately (in less than 1 second) exits with code 0 when starting for the first time +# Even with a health check, docker-compose will immediately report the container as unhealthy when using --wait instead of waiting for it to become healthy +# So, let's just start it a second time if it exits quickly +set +e +start_time=$(date +%s) +/opt/jetty-home/bin/jetty.sh run +exit_code=$? +end_time=$(date +%s) + +duration=$((end_time - start_time)) +if [ $duration -lt 5 ]; then + /opt/jetty-home/bin/jetty.sh run + exit_code=$? +fi + +exit $exit_code From d81dbfa8dad1a74fc500b1496bdb45123f30c6b9 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 9 Oct 2023 16:57:13 +0100 Subject: [PATCH 081/176] Fix race condition in InstrumentsConcurrencyTests (#100518) Fix a race condition between the two threads in InstrumentsConcurrencyTests. If the second thread gets the lock first, the test fails. Fixes #100251 --- .../metrics/InstrumentsConcurrencyTests.java | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/InstrumentsConcurrencyTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/InstrumentsConcurrencyTests.java index 51285894f27ee..4390fd4ac0784 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/InstrumentsConcurrencyTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/InstrumentsConcurrencyTests.java @@ -28,12 +28,13 @@ import static org.hamcrest.Matchers.sameInstance; public class InstrumentsConcurrencyTests extends ESTestCase { - String name = "name"; - String description = "desc"; - String unit = "kg"; - Meter noopMeter = OpenTelemetry.noop().getMeter("noop"); - CountDownLatch registerLatch = new CountDownLatch(1); - Meter lockingMeter = new Meter() { + private final String name = "name"; + private final String description = "desc"; + private final String unit = "kg"; + private final Meter noopMeter = OpenTelemetry.noop().getMeter("noop"); + private final CountDownLatch buildLatch = new CountDownLatch(1); + private final CountDownLatch registerLatch = new CountDownLatch(1); + private final Meter lockingMeter = new Meter() { @Override public LongCounterBuilder counterBuilder(String name) { return new LockingLongCounterBuilder(); @@ -75,6 +76,7 @@ public DoubleCounterBuilder ofDoubles() { @Override public LongCounter build() { try { + buildLatch.countDown(); registerLatch.await(); } catch (Exception e) { throw new RuntimeException(e); @@ -94,6 +96,8 @@ public void testLockingWhenRegistering() throws Exception { var registerThread = new Thread(() -> instruments.registerLongCounter(name, description, unit)); // registerThread has a countDown latch that is simulating a long-running registration registerThread.start(); + buildLatch.await(); // wait for registerThread to hold the lock + var setProviderThread = new Thread(() -> instruments.setProvider(noopMeter)); // a setProviderThread will attempt to override a meter, but will wait to acquireLock setProviderThread.start(); From 4de2ffa2bcf9eba0b8d09fb5f13c217598d025ee Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Mon, 9 Oct 2023 17:03:38 +0100 Subject: [PATCH 082/176] ESQL Ensure that the error delta is positive in PercentileIntGroupingAggregatorFunctionTests (#100517) This commit fixes the usage of an error delta in a test. Hamcrest's closeTo matcher does not like it when we give it a negative delta! --- .../PercentileIntGroupingAggregatorFunctionTests.java | 3 ++- .../compute/operator/ForkingOperatorTestCase.java | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunctionTests.java index 324e05311302e..db8064feafd25 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/PercentileIntGroupingAggregatorFunctionTests.java @@ -58,7 +58,8 @@ protected void assertSimpleGroup(List input, Block result, int position, L if (td.size() > 0) { double expected = td.quantile(percentile / 100); double value = ((DoubleBlock) result).getDouble(position); - assertThat(value, closeTo(expected, expected * 0.1)); + double errorDelta = Math.abs(expected * 0.1); + assertThat(value, closeTo(expected, errorDelta)); } else { assertTrue(result.isNull(position)); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java index 7edcb786d6596..e9df32282bfeb 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java @@ -129,7 +129,6 @@ public final void testInitialIntermediateFinal() { assertDriverContext(driverContext); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99160") public final void testManyInitialManyPartialFinal() { BigArrays bigArrays = nonBreakingBigArrays(); DriverContext driverContext = driverContext(); From 6d639c6d7e4481f35d411530b3336419ed61faf5 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 9 Oct 2023 12:52:23 -0400 Subject: [PATCH 083/176] ESQL: Move AwaitsFix for some issues (#100531) This moves the pointer for the AwaitsFix tag on some of our "heap attack" to an issue that we'll cover for 8.12. --- .../xpack/esql/qa/single_node/HeapAttackIT.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java index 03c207c2c211e..49a37840c4fad 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/HeapAttackIT.java @@ -245,7 +245,7 @@ public void testManyEval() throws IOException { assertMap(map, matchesMap().entry("columns", columns).entry("values", hasSize(10_000))); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100528") public void testTooManyEval() throws IOException { initManyLongs(); assertCircuitBreaks(() -> manyEval(1000)); @@ -286,7 +286,7 @@ public void testFetchManyBigFields() throws IOException { fetchManyBigFields(100); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100528") public void testFetchTooManyBigFields() throws IOException { initManyBigFieldsIndex(500); assertCircuitBreaks(() -> fetchManyBigFields(500)); @@ -342,7 +342,7 @@ public void testFetchMvLongs() throws IOException { assertMap(map, matchesMap().entry("columns", columns)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100528") public void testFetchTooManyMvLongs() throws IOException { initMvLongsIndex(500, 100, 1000); assertCircuitBreaks(() -> fetchMvLongs()); From b92a2acc5546bc54ed5ebb3ec5661eb48d1efd26 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 9 Oct 2023 11:59:52 -0500 Subject: [PATCH 084/176] Allowing reroute processor yaml rest tests to work with indices or data streams (#100463) --- modules/ingest-common/build.gradle | 2 +- .../test/ingest/310_reroute_processor.yml | 31 +++++++++---------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index e48156ef98da1..d7709115b8daa 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -25,7 +25,7 @@ dependencies { restResources { restApi { - include '_common', 'ingest', 'cluster', 'indices', 'index', 'bulk', 'nodes', 'get', 'update', 'cat', 'mget' + include '_common', 'ingest', 'cluster', 'indices', 'index', 'bulk', 'nodes', 'get', 'update', 'cat', 'mget', 'search' } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml index dbdd9b9d7e519..191b92806b6ce 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/310_reroute_processor.yml @@ -46,6 +46,8 @@ teardown: - do: index: index: logs-generic-default + refresh: true + op_type: create id: "1" pipeline: "pipeline-with-two-data-stream-processors" body: { @@ -53,10 +55,13 @@ teardown: } - do: - get: + search: index: logs-first-default - id: "1" - - match: { _source.foo: "bar" } + body: + query: + match: {"_id": "1"} + - match: { hits.hits.0._source.foo: "bar" } + --- "Test two stage routing": - skip: @@ -110,21 +115,13 @@ teardown: ] } - match: { acknowledged: true } - - do: - allowed_warnings: - - "index template [logs-nginx] has index patterns [logs-nginx-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-nginx] will take precedence during new index creation" - indices.put_index_template: - name: logs-nginx - body: - index_patterns: [ "logs-nginx-*" ] - template: - settings: - index.default_pipeline: "logs-nginx" - do: index: + refresh: true index: logs-nginx-default id: "example-log" + pipeline: "logs-nginx" op_type: create body: "@timestamp": "2022-04-13" @@ -134,7 +131,9 @@ teardown: path: "nginx-error.log" - do: - get: + search: index: logs-nginx.error-default - id: "example-log" - - match: { _source.message: "this is an error log" } + body: + query: + match: {"_id": "example-log"} + - match: { hits.hits.0._source.message: "this is an error log" } From 2292406f66ee1b3bd133bb95aaf2c4d3d27d7265 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 9 Oct 2023 13:57:17 -0400 Subject: [PATCH 085/176] ESQL: Fix tests around releasing early (#100534) The test for releasing an `Operation` without fetching it's result requires a single input page because that's the only valid way to drive all operators. To do that the test forces a single input document. Some tests were not respecting that input request. This fixes those tests. Closes #100496 --- ...edianAbsoluteDeviationDoubleAggregatorFunctionTests.java | 2 +- ...oluteDeviationDoubleGroupingAggregatorFunctionTests.java | 2 +- .../MedianAbsoluteDeviationIntAggregatorFunctionTests.java | 2 +- ...AbsoluteDeviationIntGroupingAggregatorFunctionTests.java | 2 +- .../MedianAbsoluteDeviationLongAggregatorFunctionTests.java | 2 +- ...bsoluteDeviationLongGroupingAggregatorFunctionTests.java | 2 +- .../elasticsearch/compute/operator/OperatorTestCase.java | 6 ++++-- 7 files changed, 10 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionTests.java index 86097f547bc65..cf6efe48e33ea 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunctionTests.java @@ -26,7 +26,7 @@ public class MedianAbsoluteDeviationDoubleAggregatorFunctionTests extends Aggreg protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { List values = Arrays.asList(1.2, 1.25, 2.0, 2.0, 4.3, 6.0, 9.0); Randomness.shuffle(values); - return new SequenceDoubleBlockSourceOperator(blockFactory, values); + return new SequenceDoubleBlockSourceOperator(blockFactory, values.subList(0, Math.min(values.size(), end))); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java index 445a2a6cca566..20d8dd3b46caf 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java @@ -43,7 +43,7 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { values.add(Tuple.tuple((long) i, v)); } } - return new LongDoubleTupleBlockSourceOperator(blockFactory, values); + return new LongDoubleTupleBlockSourceOperator(blockFactory, values.subList(0, Math.min(values.size(), end))); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionTests.java index 3ad2557e2b0b0..681aef76f75ba 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunctionTests.java @@ -26,7 +26,7 @@ public class MedianAbsoluteDeviationIntAggregatorFunctionTests extends Aggregato protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { List values = Arrays.asList(12, 125, 20, 20, 43, 60, 90); Randomness.shuffle(values); - return new SequenceIntBlockSourceOperator(blockFactory, values); + return new SequenceIntBlockSourceOperator(blockFactory, values.subList(0, Math.min(values.size(), end))); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunctionTests.java index 45e9d47e67aa9..42664cc14d7e2 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntGroupingAggregatorFunctionTests.java @@ -43,7 +43,7 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { values.add(Tuple.tuple((long) i, v)); } } - return new LongIntBlockSourceOperator(blockFactory, values); + return new LongIntBlockSourceOperator(blockFactory, values.subList(0, Math.min(values.size(), end))); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionTests.java index 465bb5800bbb6..0ba6dc6eb4812 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunctionTests.java @@ -26,7 +26,7 @@ public class MedianAbsoluteDeviationLongAggregatorFunctionTests extends Aggregat protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { List values = Arrays.asList(12L, 125L, 20L, 20L, 43L, 60L, 90L); Randomness.shuffle(values); - return new SequenceLongBlockSourceOperator(blockFactory, values); + return new SequenceLongBlockSourceOperator(blockFactory, values.subList(0, Math.min(values.size(), end))); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunctionTests.java index 2c6bfc1204591..b53fab2567499 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongGroupingAggregatorFunctionTests.java @@ -43,7 +43,7 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int end) { values.add(Tuple.tuple((long) i, v)); } } - return new TupleBlockSourceOperator(blockFactory, values); + return new TupleBlockSourceOperator(blockFactory, values.subList(0, Math.min(values.size(), end))); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java index 2022375c8c774..63f601669636c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java @@ -217,8 +217,10 @@ protected final void assertSimple(DriverContext context, int size) { } } - // Tests that finish then close without calling getOutput to retrieve a potential last page, releases all memory - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100496") + /** + * Tests that finish then close without calling {@link Operator#getOutput} to + * retrieve a potential last page, releases all memory. + */ public void testSimpleFinishClose() { DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), 1)); From bd1c09f3605dfee5473ca48f127a81b47855cbe0 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 9 Oct 2023 11:34:31 -0700 Subject: [PATCH 086/176] Create new Block on filter (#100465) Today, we do not deduplicate data for filtered Block/Vector but rather share the underlying data with the existing one. This choice might improve performance since it avoids copying data. However, we have encountered issues due to the lack of clear ownership. We will likely introduce reference counting for Block/Vector. Until that is implemented, this PR addresses the duplication of data for filtered Block/Vector. --- x-pack/plugin/esql/compute/build.gradle | 54 ------ .../compute/data/BooleanArrayBlock.java | 21 ++- .../compute/data/BooleanArrayVector.java | 7 +- .../compute/data/BooleanBigArrayVector.java | 8 +- .../compute/data/BooleanBlock.java | 2 +- .../compute/data/BooleanVector.java | 3 +- .../compute/data/BooleanVectorBlock.java | 2 +- .../compute/data/BytesRefArrayBlock.java | 22 ++- .../compute/data/BytesRefArrayVector.java | 8 +- .../compute/data/BytesRefBlock.java | 2 +- .../compute/data/BytesRefVector.java | 2 +- .../compute/data/BytesRefVectorBlock.java | 2 +- .../compute/data/DoubleArrayBlock.java | 21 ++- .../compute/data/DoubleArrayVector.java | 7 +- .../compute/data/DoubleBigArrayVector.java | 6 +- .../compute/data/DoubleBlock.java | 2 +- .../compute/data/DoubleVector.java | 3 +- .../compute/data/DoubleVectorBlock.java | 2 +- .../compute/data/FilterBooleanBlock.java | 140 --------------- .../compute/data/FilterBooleanVector.java | 105 ----------- .../compute/data/FilterBytesRefBlock.java | 143 --------------- .../compute/data/FilterBytesRefVector.java | 106 ----------- .../compute/data/FilterDoubleBlock.java | 140 --------------- .../compute/data/FilterDoubleVector.java | 105 ----------- .../compute/data/FilterIntBlock.java | 140 --------------- .../compute/data/FilterIntVector.java | 105 ----------- .../compute/data/FilterLongBlock.java | 140 --------------- .../compute/data/FilterLongVector.java | 105 ----------- .../compute/data/IntArrayBlock.java | 21 ++- .../compute/data/IntArrayVector.java | 7 +- .../compute/data/IntBigArrayVector.java | 6 +- .../elasticsearch/compute/data/IntBlock.java | 2 +- .../elasticsearch/compute/data/IntVector.java | 2 +- .../compute/data/IntVectorBlock.java | 2 +- .../compute/data/LongArrayBlock.java | 21 ++- .../compute/data/LongArrayVector.java | 7 +- .../compute/data/LongBigArrayVector.java | 6 +- .../elasticsearch/compute/data/LongBlock.java | 2 +- .../compute/data/LongVector.java | 2 +- .../compute/data/LongVectorBlock.java | 2 +- .../compute/data/AbstractBlock.java | 2 +- .../compute/data/AbstractFilterBlock.java | 128 -------------- .../compute/data/AbstractFilterVector.java | 28 --- .../compute/data/ConstantNullBlock.java | 4 +- .../compute/data/X-ArrayBlock.java.st | 24 ++- .../compute/data/X-ArrayVector.java.st | 14 +- .../compute/data/X-BigArrayVector.java.st | 15 +- .../compute/data/X-Block.java.st | 2 +- .../compute/data/X-FilterBlock.java.st | 164 ------------------ .../compute/data/X-FilterVector.java.st | 117 ------------- .../compute/data/X-Vector.java.st | 10 +- .../compute/data/X-VectorBlock.java.st | 2 +- .../compute/operator/FilterOperator.java | 14 +- .../compute/operator/LimitOperator.java | 15 +- .../compute/data/BasicBlockTests.java | 19 +- .../compute/data/BlockAccountingTests.java | 85 +++++---- .../compute/data/FilteredBlockTests.java | 131 ++++++++------ .../compute/operator/FilterOperatorTests.java | 7 - .../compute/operator/LimitOperatorTests.java | 7 - .../function/scalar/conditional/Case.java | 38 ++-- .../function/scalar/nulls/Coalesce.java | 15 +- 61 files changed, 425 insertions(+), 1899 deletions(-) delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanBlock.java delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanVector.java delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefBlock.java delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefVector.java delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleBlock.java delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleVector.java delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntBlock.java delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntVector.java delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongBlock.java delete mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongVector.java delete mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterBlock.java delete mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterVector.java delete mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterBlock.java.st delete mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterVector.java.st diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 7215b53a1a4f0..0fd5b0064fa7d 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -127,33 +127,6 @@ tasks.named('stringTemplates').configure { it.inputFile = bigArrayVectorInputFile it.outputFile = "org/elasticsearch/compute/data/BooleanBigArrayVector.java" } - // filter vectors - File filterVectorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-FilterVector.java.st") - template { - it.properties = intProperties - it.inputFile = filterVectorInputFile - it.outputFile = "org/elasticsearch/compute/data/FilterIntVector.java" - } - template { - it.properties = longProperties - it.inputFile = filterVectorInputFile - it.outputFile = "org/elasticsearch/compute/data/FilterLongVector.java" - } - template { - it.properties = doubleProperties - it.inputFile = filterVectorInputFile - it.outputFile = "org/elasticsearch/compute/data/FilterDoubleVector.java" - } - template { - it.properties = bytesRefProperties - it.inputFile = filterVectorInputFile - it.outputFile = "org/elasticsearch/compute/data/FilterBytesRefVector.java" - } - template { - it.properties = booleanProperties - it.inputFile = filterVectorInputFile - it.outputFile = "org/elasticsearch/compute/data/FilterBooleanVector.java" - } // constant vectors File constantVectorInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st") template { @@ -235,33 +208,6 @@ tasks.named('stringTemplates').configure { it.inputFile = arrayBlockInputFile it.outputFile = "org/elasticsearch/compute/data/BooleanArrayBlock.java" } - // filter blocks - File filterBlockInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-FilterBlock.java.st") - template { - it.properties = intProperties - it.inputFile = filterBlockInputFile - it.outputFile = "org/elasticsearch/compute/data/FilterIntBlock.java" - } - template { - it.properties = longProperties - it.inputFile = filterBlockInputFile - it.outputFile = "org/elasticsearch/compute/data/FilterLongBlock.java" - } - template { - it.properties = doubleProperties - it.inputFile = filterBlockInputFile - it.outputFile = "org/elasticsearch/compute/data/FilterDoubleBlock.java" - } - template { - it.properties = bytesRefProperties - it.inputFile = filterBlockInputFile - it.outputFile = "org/elasticsearch/compute/data/FilterBytesRefBlock.java" - } - template { - it.properties = booleanProperties - it.inputFile = filterBlockInputFile - it.outputFile = "org/elasticsearch/compute/data/FilterBooleanBlock.java" - } // vector blocks File vectorBlockInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st") template { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index b6e36e698355b..adf1282c21fb0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -51,7 +51,26 @@ public boolean getBoolean(int valueIndex) { @Override public BooleanBlock filter(int... positions) { - return new FilterBooleanBlock(this, positions); + try (var builder = blockFactory.newBooleanBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendBoolean(getBoolean(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendBoolean(getBoolean(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java index 70e54aa81ec11..8ad4196c57997 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java @@ -55,7 +55,12 @@ public boolean isConstant() { @Override public BooleanVector filter(int... positions) { - return new FilterBooleanVector(this, positions); + try (BooleanVector.Builder builder = blockFactory.newBooleanVectorBuilder(positions.length)) { + for (int pos : positions) { + builder.appendBoolean(values[pos]); + } + return builder.build(); + } } public static long ramBytesEstimated(boolean[] values) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java index 86cd106812e5f..13dd594a548ec 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java @@ -60,7 +60,13 @@ public long ramBytesUsed() { @Override public BooleanVector filter(int... positions) { - return new FilterBooleanVector(this, positions); + final BitArray filtered = new BitArray(positions.length, blockFactory.bigArrays()); + for (int i = 0; i < positions.length; i++) { + if (values.get(positions[i])) { + filtered.set(i); + } + } + return new BooleanBigArrayVector(filtered, positions.length, blockFactory); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index 632ebdeaa2882..f8f291c2a2e69 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -17,7 +17,7 @@ * Block that stores boolean values. * This class is generated. Do not edit it. */ -public sealed interface BooleanBlock extends Block permits FilterBooleanBlock, BooleanArrayBlock, BooleanVectorBlock { +public sealed interface BooleanBlock extends Block permits BooleanArrayBlock, BooleanVectorBlock { /** * Retrieves the boolean value stored at the given value index. diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java index d5dc9c23d7eee..9a4cc64d760ef 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java @@ -16,8 +16,7 @@ * Vector that stores boolean values. * This class is generated. Do not edit it. */ -public sealed interface BooleanVector extends Vector permits ConstantBooleanVector, FilterBooleanVector, BooleanArrayVector, - BooleanBigArrayVector { +public sealed interface BooleanVector extends Vector permits ConstantBooleanVector, BooleanArrayVector, BooleanBigArrayVector { boolean getBoolean(int position); @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index a464d52482ced..383543f1451ff 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -44,7 +44,7 @@ public ElementType elementType() { @Override public BooleanBlock filter(int... positions) { - return new FilterBooleanVector(vector, positions).asBlock(); + return vector.filter(positions).asBlock(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index db5b5d3fcf804..f46615307f767 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -53,7 +53,27 @@ public BytesRef getBytesRef(int valueIndex, BytesRef dest) { @Override public BytesRefBlock filter(int... positions) { - return new FilterBytesRefBlock(this, positions); + final BytesRef scratch = new BytesRef(); + try (var builder = blockFactory.newBytesRefBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendBytesRef(getBytesRef(getFirstValueIndex(pos), scratch)); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendBytesRef(getBytesRef(first + c, scratch)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java index 05e82d193f69d..cabe8b86be2bf 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java @@ -56,7 +56,13 @@ public boolean isConstant() { @Override public BytesRefVector filter(int... positions) { - return new FilterBytesRefVector(this, positions); + final var scratch = new BytesRef(); + try (BytesRefVector.Builder builder = blockFactory.newBytesRefVectorBuilder(positions.length)) { + for (int pos : positions) { + builder.appendBytesRef(values.get(pos, scratch)); + } + return builder.build(); + } } public static long ramBytesEstimated(BytesRefArray values) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index 64ec7347ebeb6..488d3032b2b08 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -18,7 +18,7 @@ * Block that stores BytesRef values. * This class is generated. Do not edit it. */ -public sealed interface BytesRefBlock extends Block permits FilterBytesRefBlock, BytesRefArrayBlock, BytesRefVectorBlock { +public sealed interface BytesRefBlock extends Block permits BytesRefArrayBlock, BytesRefVectorBlock { BytesRef NULL_VALUE = new BytesRef(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java index 3dd334a9fa71d..82a18f5d5b79e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java @@ -17,7 +17,7 @@ * Vector that stores BytesRef values. * This class is generated. Do not edit it. */ -public sealed interface BytesRefVector extends Vector permits ConstantBytesRefVector, FilterBytesRefVector, BytesRefArrayVector { +public sealed interface BytesRefVector extends Vector permits ConstantBytesRefVector, BytesRefArrayVector { BytesRef getBytesRef(int position, BytesRef dest); @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index 3d761c6937c1b..38a2243e1f532 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -45,7 +45,7 @@ public ElementType elementType() { @Override public BytesRefBlock filter(int... positions) { - return new FilterBytesRefVector(vector, positions).asBlock(); + return vector.filter(positions).asBlock(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index 675952a8d6a85..b0d77dd71271e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -51,7 +51,26 @@ public double getDouble(int valueIndex) { @Override public DoubleBlock filter(int... positions) { - return new FilterDoubleBlock(this, positions); + try (var builder = blockFactory.newDoubleBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendDouble(getDouble(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendDouble(getDouble(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java index 9007cee0cd780..69cf686a1576a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java @@ -55,7 +55,12 @@ public boolean isConstant() { @Override public DoubleVector filter(int... positions) { - return new FilterDoubleVector(this, positions); + try (DoubleVector.Builder builder = blockFactory.newDoubleVectorBuilder(positions.length)) { + for (int pos : positions) { + builder.appendDouble(values[pos]); + } + return builder.build(); + } } public static long ramBytesEstimated(double[] values) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java index c968116e023cb..20f5a65d8d34c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java @@ -60,7 +60,11 @@ public long ramBytesUsed() { @Override public DoubleVector filter(int... positions) { - return new FilterDoubleVector(this, positions); + final DoubleArray filtered = blockFactory.bigArrays().newDoubleArray(positions.length, true); + for (int i = 0; i < positions.length; i++) { + filtered.set(i, values.get(positions[i])); + } + return new DoubleBigArrayVector(filtered, positions.length, blockFactory); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index 7e8c47263630b..c2e63a0c6f384 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -17,7 +17,7 @@ * Block that stores double values. * This class is generated. Do not edit it. */ -public sealed interface DoubleBlock extends Block permits FilterDoubleBlock, DoubleArrayBlock, DoubleVectorBlock { +public sealed interface DoubleBlock extends Block permits DoubleArrayBlock, DoubleVectorBlock { /** * Retrieves the double value stored at the given value index. diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java index e2aaeed94ba6d..545d17004333a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java @@ -16,8 +16,7 @@ * Vector that stores double values. * This class is generated. Do not edit it. */ -public sealed interface DoubleVector extends Vector permits ConstantDoubleVector, FilterDoubleVector, DoubleArrayVector, - DoubleBigArrayVector { +public sealed interface DoubleVector extends Vector permits ConstantDoubleVector, DoubleArrayVector, DoubleBigArrayVector { double getDouble(int position); @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index 5c95bba795017..2c25313e97f29 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -44,7 +44,7 @@ public ElementType elementType() { @Override public DoubleBlock filter(int... positions) { - return new FilterDoubleVector(vector, positions).asBlock(); + return vector.filter(positions).asBlock(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanBlock.java deleted file mode 100644 index fd91e672a7d50..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanBlock.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter block for BooleanBlocks. - * This class is generated. Do not edit it. - */ -final class FilterBooleanBlock extends AbstractFilterBlock implements BooleanBlock { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterBooleanBlock.class); - - private final BooleanBlock block; - - FilterBooleanBlock(BooleanBlock block, int... positions) { - super(block, positions); - this.block = block; - } - - @Override - public BooleanVector asVector() { - return null; - } - - @Override - public boolean getBoolean(int valueIndex) { - return block.getBoolean(valueIndex); - } - - @Override - public ElementType elementType() { - return ElementType.BOOLEAN; - } - - @Override - public BooleanBlock filter(int... positions) { - return new FilterBooleanBlock(this, positions); - } - - @Override - public BooleanBlock expand() { - if (false == block.mayHaveMultivaluedFields()) { - return this; - } - /* - * Build a copy of the target block, selecting only the positions - * we've been assigned and expanding all multivalued fields - * into single valued fields. - */ - try (BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(positions.length, blockFactory())) { - for (int p : positions) { - if (block.isNull(p)) { - builder.appendNull(); - continue; - } - int start = block.getFirstValueIndex(p); - int end = start + block.getValueCount(p); - for (int i = start; i < end; i++) { - builder.appendBoolean(block.getBoolean(i)); - } - } - return builder.build(); - } - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter blocks encapsulate - // their inner block, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof BooleanBlock that) { - return BooleanBlock.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return BooleanBlock.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount()); - sb.append(", released=" + isReleased()); - if (isReleased() == false) { - sb.append(", values=["); - appendValues(sb); - sb.append("]"); - } - sb.append("]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int p = 0; p < positions; p++) { - if (p > 0) { - sb.append(", "); - } - int start = getFirstValueIndex(p); - int count = getValueCount(p); - if (count == 1) { - sb.append(getBoolean(start)); - continue; - } - sb.append('['); - int end = start + count; - for (int i = start; i < end; i++) { - if (i > start) { - sb.append(", "); - } - sb.append(getBoolean(i)); - } - sb.append(']'); - } - } - - @Override - public void close() { - if (block.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - Releasables.closeExpectNoException(block); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanVector.java deleted file mode 100644 index 3ef4d705a1e36..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanVector.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter vector for BooleanVectors. - * This class is generated. Do not edit it. - */ -public final class FilterBooleanVector extends AbstractFilterVector implements BooleanVector { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterBooleanVector.class); - - private final BooleanVector vector; - - private final BooleanBlock block; - - FilterBooleanVector(BooleanVector vector, int... positions) { - super(positions, vector.blockFactory()); - this.vector = vector; - this.block = new BooleanVectorBlock(this); - } - - @Override - public boolean getBoolean(int position) { - return vector.getBoolean(mapPosition(position)); - } - - @Override - public BooleanBlock asBlock() { - return block; - } - - @Override - public ElementType elementType() { - return ElementType.BOOLEAN; - } - - @Override - public boolean isConstant() { - return vector.isConstant(); - } - - @Override - public BooleanVector filter(int... positions) { - return new FilterBooleanVector(this, positions); - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter vectors encapsulate - // their inner vector, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof BooleanVector that) { - return BooleanVector.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return BooleanVector.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount() + ", values=["); - appendValues(sb); - sb.append("]]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int i = 0; i < positions; i++) { - if (i > 0) { - sb.append(", "); - } - sb.append(getBoolean(i)); - } - } - - @Override - public BlockFactory blockFactory() { - return vector.blockFactory(); - } - - @Override - public void close() { - Releasables.closeExpectNoException(vector); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefBlock.java deleted file mode 100644 index e9b15c2318de8..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefBlock.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter block for BytesRefBlocks. - * This class is generated. Do not edit it. - */ -final class FilterBytesRefBlock extends AbstractFilterBlock implements BytesRefBlock { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterBytesRefBlock.class); - - private final BytesRefBlock block; - - FilterBytesRefBlock(BytesRefBlock block, int... positions) { - super(block, positions); - this.block = block; - } - - @Override - public BytesRefVector asVector() { - return null; - } - - @Override - public BytesRef getBytesRef(int valueIndex, BytesRef dest) { - return block.getBytesRef(valueIndex, dest); - } - - @Override - public ElementType elementType() { - return ElementType.BYTES_REF; - } - - @Override - public BytesRefBlock filter(int... positions) { - return new FilterBytesRefBlock(this, positions); - } - - @Override - public BytesRefBlock expand() { - if (false == block.mayHaveMultivaluedFields()) { - return this; - } - /* - * Build a copy of the target block, selecting only the positions - * we've been assigned and expanding all multivalued fields - * into single valued fields. - */ - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(positions.length, blockFactory())) { - BytesRef scratch = new BytesRef(); - for (int p : positions) { - if (block.isNull(p)) { - builder.appendNull(); - continue; - } - int start = block.getFirstValueIndex(p); - int end = start + block.getValueCount(p); - for (int i = start; i < end; i++) { - BytesRef v = block.getBytesRef(i, scratch); - builder.appendBytesRef(v); - } - } - return builder.build(); - } - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter blocks encapsulate - // their inner block, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof BytesRefBlock that) { - return BytesRefBlock.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return BytesRefBlock.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount()); - sb.append(", released=" + isReleased()); - if (isReleased() == false) { - sb.append(", values=["); - appendValues(sb); - sb.append("]"); - } - sb.append("]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int p = 0; p < positions; p++) { - if (p > 0) { - sb.append(", "); - } - int start = getFirstValueIndex(p); - int count = getValueCount(p); - if (count == 1) { - sb.append(getBytesRef(start, new BytesRef())); - continue; - } - sb.append('['); - int end = start + count; - for (int i = start; i < end; i++) { - if (i > start) { - sb.append(", "); - } - sb.append(getBytesRef(i, new BytesRef())); - } - sb.append(']'); - } - } - - @Override - public void close() { - if (block.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - Releasables.closeExpectNoException(block); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefVector.java deleted file mode 100644 index bd3bdfceb338a..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefVector.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter vector for BytesRefVectors. - * This class is generated. Do not edit it. - */ -public final class FilterBytesRefVector extends AbstractFilterVector implements BytesRefVector { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterBytesRefVector.class); - - private final BytesRefVector vector; - - private final BytesRefBlock block; - - FilterBytesRefVector(BytesRefVector vector, int... positions) { - super(positions, vector.blockFactory()); - this.vector = vector; - this.block = new BytesRefVectorBlock(this); - } - - @Override - public BytesRef getBytesRef(int position, BytesRef dest) { - return vector.getBytesRef(mapPosition(position), dest); - } - - @Override - public BytesRefBlock asBlock() { - return block; - } - - @Override - public ElementType elementType() { - return ElementType.BYTES_REF; - } - - @Override - public boolean isConstant() { - return vector.isConstant(); - } - - @Override - public BytesRefVector filter(int... positions) { - return new FilterBytesRefVector(this, positions); - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter vectors encapsulate - // their inner vector, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof BytesRefVector that) { - return BytesRefVector.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return BytesRefVector.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount() + ", values=["); - appendValues(sb); - sb.append("]]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int i = 0; i < positions; i++) { - if (i > 0) { - sb.append(", "); - } - sb.append(getBytesRef(i, new BytesRef())); - } - } - - @Override - public BlockFactory blockFactory() { - return vector.blockFactory(); - } - - @Override - public void close() { - Releasables.closeExpectNoException(vector); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleBlock.java deleted file mode 100644 index 5d2710b0e31c3..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleBlock.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter block for DoubleBlocks. - * This class is generated. Do not edit it. - */ -final class FilterDoubleBlock extends AbstractFilterBlock implements DoubleBlock { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterDoubleBlock.class); - - private final DoubleBlock block; - - FilterDoubleBlock(DoubleBlock block, int... positions) { - super(block, positions); - this.block = block; - } - - @Override - public DoubleVector asVector() { - return null; - } - - @Override - public double getDouble(int valueIndex) { - return block.getDouble(valueIndex); - } - - @Override - public ElementType elementType() { - return ElementType.DOUBLE; - } - - @Override - public DoubleBlock filter(int... positions) { - return new FilterDoubleBlock(this, positions); - } - - @Override - public DoubleBlock expand() { - if (false == block.mayHaveMultivaluedFields()) { - return this; - } - /* - * Build a copy of the target block, selecting only the positions - * we've been assigned and expanding all multivalued fields - * into single valued fields. - */ - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(positions.length, blockFactory())) { - for (int p : positions) { - if (block.isNull(p)) { - builder.appendNull(); - continue; - } - int start = block.getFirstValueIndex(p); - int end = start + block.getValueCount(p); - for (int i = start; i < end; i++) { - builder.appendDouble(block.getDouble(i)); - } - } - return builder.build(); - } - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter blocks encapsulate - // their inner block, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof DoubleBlock that) { - return DoubleBlock.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return DoubleBlock.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount()); - sb.append(", released=" + isReleased()); - if (isReleased() == false) { - sb.append(", values=["); - appendValues(sb); - sb.append("]"); - } - sb.append("]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int p = 0; p < positions; p++) { - if (p > 0) { - sb.append(", "); - } - int start = getFirstValueIndex(p); - int count = getValueCount(p); - if (count == 1) { - sb.append(getDouble(start)); - continue; - } - sb.append('['); - int end = start + count; - for (int i = start; i < end; i++) { - if (i > start) { - sb.append(", "); - } - sb.append(getDouble(i)); - } - sb.append(']'); - } - } - - @Override - public void close() { - if (block.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - Releasables.closeExpectNoException(block); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleVector.java deleted file mode 100644 index 13421ed2e7025..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleVector.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter vector for DoubleVectors. - * This class is generated. Do not edit it. - */ -public final class FilterDoubleVector extends AbstractFilterVector implements DoubleVector { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterDoubleVector.class); - - private final DoubleVector vector; - - private final DoubleBlock block; - - FilterDoubleVector(DoubleVector vector, int... positions) { - super(positions, vector.blockFactory()); - this.vector = vector; - this.block = new DoubleVectorBlock(this); - } - - @Override - public double getDouble(int position) { - return vector.getDouble(mapPosition(position)); - } - - @Override - public DoubleBlock asBlock() { - return block; - } - - @Override - public ElementType elementType() { - return ElementType.DOUBLE; - } - - @Override - public boolean isConstant() { - return vector.isConstant(); - } - - @Override - public DoubleVector filter(int... positions) { - return new FilterDoubleVector(this, positions); - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter vectors encapsulate - // their inner vector, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof DoubleVector that) { - return DoubleVector.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return DoubleVector.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount() + ", values=["); - appendValues(sb); - sb.append("]]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int i = 0; i < positions; i++) { - if (i > 0) { - sb.append(", "); - } - sb.append(getDouble(i)); - } - } - - @Override - public BlockFactory blockFactory() { - return vector.blockFactory(); - } - - @Override - public void close() { - Releasables.closeExpectNoException(vector); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntBlock.java deleted file mode 100644 index 75b14d786d552..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntBlock.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter block for IntBlocks. - * This class is generated. Do not edit it. - */ -final class FilterIntBlock extends AbstractFilterBlock implements IntBlock { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterIntBlock.class); - - private final IntBlock block; - - FilterIntBlock(IntBlock block, int... positions) { - super(block, positions); - this.block = block; - } - - @Override - public IntVector asVector() { - return null; - } - - @Override - public int getInt(int valueIndex) { - return block.getInt(valueIndex); - } - - @Override - public ElementType elementType() { - return ElementType.INT; - } - - @Override - public IntBlock filter(int... positions) { - return new FilterIntBlock(this, positions); - } - - @Override - public IntBlock expand() { - if (false == block.mayHaveMultivaluedFields()) { - return this; - } - /* - * Build a copy of the target block, selecting only the positions - * we've been assigned and expanding all multivalued fields - * into single valued fields. - */ - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(positions.length, blockFactory())) { - for (int p : positions) { - if (block.isNull(p)) { - builder.appendNull(); - continue; - } - int start = block.getFirstValueIndex(p); - int end = start + block.getValueCount(p); - for (int i = start; i < end; i++) { - builder.appendInt(block.getInt(i)); - } - } - return builder.build(); - } - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter blocks encapsulate - // their inner block, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof IntBlock that) { - return IntBlock.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return IntBlock.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount()); - sb.append(", released=" + isReleased()); - if (isReleased() == false) { - sb.append(", values=["); - appendValues(sb); - sb.append("]"); - } - sb.append("]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int p = 0; p < positions; p++) { - if (p > 0) { - sb.append(", "); - } - int start = getFirstValueIndex(p); - int count = getValueCount(p); - if (count == 1) { - sb.append(getInt(start)); - continue; - } - sb.append('['); - int end = start + count; - for (int i = start; i < end; i++) { - if (i > start) { - sb.append(", "); - } - sb.append(getInt(i)); - } - sb.append(']'); - } - } - - @Override - public void close() { - if (block.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - Releasables.closeExpectNoException(block); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntVector.java deleted file mode 100644 index 994e733333fe1..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntVector.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter vector for IntVectors. - * This class is generated. Do not edit it. - */ -public final class FilterIntVector extends AbstractFilterVector implements IntVector { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterIntVector.class); - - private final IntVector vector; - - private final IntBlock block; - - FilterIntVector(IntVector vector, int... positions) { - super(positions, vector.blockFactory()); - this.vector = vector; - this.block = new IntVectorBlock(this); - } - - @Override - public int getInt(int position) { - return vector.getInt(mapPosition(position)); - } - - @Override - public IntBlock asBlock() { - return block; - } - - @Override - public ElementType elementType() { - return ElementType.INT; - } - - @Override - public boolean isConstant() { - return vector.isConstant(); - } - - @Override - public IntVector filter(int... positions) { - return new FilterIntVector(this, positions); - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter vectors encapsulate - // their inner vector, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof IntVector that) { - return IntVector.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return IntVector.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount() + ", values=["); - appendValues(sb); - sb.append("]]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int i = 0; i < positions; i++) { - if (i > 0) { - sb.append(", "); - } - sb.append(getInt(i)); - } - } - - @Override - public BlockFactory blockFactory() { - return vector.blockFactory(); - } - - @Override - public void close() { - Releasables.closeExpectNoException(vector); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongBlock.java deleted file mode 100644 index 236049a46b4be..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongBlock.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter block for LongBlocks. - * This class is generated. Do not edit it. - */ -final class FilterLongBlock extends AbstractFilterBlock implements LongBlock { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterLongBlock.class); - - private final LongBlock block; - - FilterLongBlock(LongBlock block, int... positions) { - super(block, positions); - this.block = block; - } - - @Override - public LongVector asVector() { - return null; - } - - @Override - public long getLong(int valueIndex) { - return block.getLong(valueIndex); - } - - @Override - public ElementType elementType() { - return ElementType.LONG; - } - - @Override - public LongBlock filter(int... positions) { - return new FilterLongBlock(this, positions); - } - - @Override - public LongBlock expand() { - if (false == block.mayHaveMultivaluedFields()) { - return this; - } - /* - * Build a copy of the target block, selecting only the positions - * we've been assigned and expanding all multivalued fields - * into single valued fields. - */ - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(positions.length, blockFactory())) { - for (int p : positions) { - if (block.isNull(p)) { - builder.appendNull(); - continue; - } - int start = block.getFirstValueIndex(p); - int end = start + block.getValueCount(p); - for (int i = start; i < end; i++) { - builder.appendLong(block.getLong(i)); - } - } - return builder.build(); - } - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter blocks encapsulate - // their inner block, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof LongBlock that) { - return LongBlock.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return LongBlock.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount()); - sb.append(", released=" + isReleased()); - if (isReleased() == false) { - sb.append(", values=["); - appendValues(sb); - sb.append("]"); - } - sb.append("]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int p = 0; p < positions; p++) { - if (p > 0) { - sb.append(", "); - } - int start = getFirstValueIndex(p); - int count = getValueCount(p); - if (count == 1) { - sb.append(getLong(start)); - continue; - } - sb.append('['); - int end = start + count; - for (int i = start; i < end; i++) { - if (i > start) { - sb.append(", "); - } - sb.append(getLong(i)); - } - sb.append(']'); - } - } - - @Override - public void close() { - if (block.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - Releasables.closeExpectNoException(block); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongVector.java deleted file mode 100644 index 340699b1f62d4..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongVector.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter vector for LongVectors. - * This class is generated. Do not edit it. - */ -public final class FilterLongVector extends AbstractFilterVector implements LongVector { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterLongVector.class); - - private final LongVector vector; - - private final LongBlock block; - - FilterLongVector(LongVector vector, int... positions) { - super(positions, vector.blockFactory()); - this.vector = vector; - this.block = new LongVectorBlock(this); - } - - @Override - public long getLong(int position) { - return vector.getLong(mapPosition(position)); - } - - @Override - public LongBlock asBlock() { - return block; - } - - @Override - public ElementType elementType() { - return ElementType.LONG; - } - - @Override - public boolean isConstant() { - return vector.isConstant(); - } - - @Override - public LongVector filter(int... positions) { - return new FilterLongVector(this, positions); - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter vectors encapsulate - // their inner vector, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof LongVector that) { - return LongVector.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return LongVector.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount() + ", values=["); - appendValues(sb); - sb.append("]]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int i = 0; i < positions; i++) { - if (i > 0) { - sb.append(", "); - } - sb.append(getLong(i)); - } - } - - @Override - public BlockFactory blockFactory() { - return vector.blockFactory(); - } - - @Override - public void close() { - Releasables.closeExpectNoException(vector); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 4170009b89ab2..97791a03c6044 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -51,7 +51,26 @@ public int getInt(int valueIndex) { @Override public IntBlock filter(int... positions) { - return new FilterIntBlock(this, positions); + try (var builder = blockFactory.newIntBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendInt(getInt(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendInt(getInt(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java index 429b31d9519ea..90766a9a67d81 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java @@ -55,7 +55,12 @@ public boolean isConstant() { @Override public IntVector filter(int... positions) { - return new FilterIntVector(this, positions); + try (IntVector.Builder builder = blockFactory.newIntVectorBuilder(positions.length)) { + for (int pos : positions) { + builder.appendInt(values[pos]); + } + return builder.build(); + } } public static long ramBytesEstimated(int[] values) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java index 3d0ae4063f59f..718b07274d366 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java @@ -60,7 +60,11 @@ public long ramBytesUsed() { @Override public IntVector filter(int... positions) { - return new FilterIntVector(this, positions); + final IntArray filtered = blockFactory.bigArrays().newIntArray(positions.length, true); + for (int i = 0; i < positions.length; i++) { + filtered.set(i, values.get(positions[i])); + } + return new IntBigArrayVector(filtered, positions.length, blockFactory); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index 32b7024963e87..f27e855809491 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -17,7 +17,7 @@ * Block that stores int values. * This class is generated. Do not edit it. */ -public sealed interface IntBlock extends Block permits FilterIntBlock, IntArrayBlock, IntVectorBlock { +public sealed interface IntBlock extends Block permits IntArrayBlock, IntVectorBlock { /** * Retrieves the int value stored at the given value index. diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java index 157f7f1406072..6c3b46c3228e6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java @@ -16,7 +16,7 @@ * Vector that stores int values. * This class is generated. Do not edit it. */ -public sealed interface IntVector extends Vector permits ConstantIntVector, FilterIntVector, IntArrayVector, IntBigArrayVector { +public sealed interface IntVector extends Vector permits ConstantIntVector, IntArrayVector, IntBigArrayVector { int getInt(int position); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index d7051d533a13c..e2fc6b3313d6a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -44,7 +44,7 @@ public ElementType elementType() { @Override public IntBlock filter(int... positions) { - return new FilterIntVector(vector, positions).asBlock(); + return vector.filter(positions).asBlock(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index 778ec4294180c..dddc5296e471e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -51,7 +51,26 @@ public long getLong(int valueIndex) { @Override public LongBlock filter(int... positions) { - return new FilterLongBlock(this, positions); + try (var builder = blockFactory.newLongBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendLong(getLong(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendLong(getLong(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java index 0bfd53c2f063b..b476556ce27fa 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java @@ -55,7 +55,12 @@ public boolean isConstant() { @Override public LongVector filter(int... positions) { - return new FilterLongVector(this, positions); + try (LongVector.Builder builder = blockFactory.newLongVectorBuilder(positions.length)) { + for (int pos : positions) { + builder.appendLong(values[pos]); + } + return builder.build(); + } } public static long ramBytesEstimated(long[] values) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java index ed5f3fdc34549..99179cb6d87e3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java @@ -60,7 +60,11 @@ public long ramBytesUsed() { @Override public LongVector filter(int... positions) { - return new FilterLongVector(this, positions); + final LongArray filtered = blockFactory.bigArrays().newLongArray(positions.length, true); + for (int i = 0; i < positions.length; i++) { + filtered.set(i, values.get(positions[i])); + } + return new LongBigArrayVector(filtered, positions.length, blockFactory); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index dd3a9d79fbaf7..287b55eac3d04 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -17,7 +17,7 @@ * Block that stores long values. * This class is generated. Do not edit it. */ -public sealed interface LongBlock extends Block permits FilterLongBlock, LongArrayBlock, LongVectorBlock { +public sealed interface LongBlock extends Block permits LongArrayBlock, LongVectorBlock { /** * Retrieves the long value stored at the given value index. diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java index de2e51cfda4ea..44e81139adccf 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java @@ -16,7 +16,7 @@ * Vector that stores long values. * This class is generated. Do not edit it. */ -public sealed interface LongVector extends Vector permits ConstantLongVector, FilterLongVector, LongArrayVector, LongBigArrayVector { +public sealed interface LongVector extends Vector permits ConstantLongVector, LongArrayVector, LongBigArrayVector { long getLong(int position); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index e61b24bd0ec78..f8c0d6d1df417 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -44,7 +44,7 @@ public ElementType elementType() { @Override public LongBlock filter(int... positions) { - return new FilterLongVector(vector, positions).asBlock(); + return vector.filter(positions).asBlock(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java index 24d2bca58be7e..cbe74c814594d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java @@ -45,7 +45,7 @@ protected AbstractBlock(int positionCount, @Nullable int[] firstValueIndexes, @N this.blockFactory = blockFactory; this.firstValueIndexes = firstValueIndexes; this.nullsMask = nullsMask == null || nullsMask.isEmpty() ? null : nullsMask; - assert (firstValueIndexes == null && this.nullsMask == null) == false; + assert nullsMask != null || firstValueIndexes != null : "Create VectorBlock instead"; } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterBlock.java deleted file mode 100644 index 5f7d637069234..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterBlock.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -import java.util.Arrays; - -abstract class AbstractFilterBlock implements Block { - - protected final int[] positions; - - private final Block block; - - AbstractFilterBlock(Block block, int[] positions) { - this.positions = positions; - this.block = block; - } - - @Override - public ElementType elementType() { - return block.elementType(); - } - - @Override - public boolean isNull(int position) { - return block.isNull(mapPosition(position)); - } - - @Override - public boolean mayHaveNulls() { - return block.mayHaveNulls(); - } - - @Override - public boolean areAllValuesNull() { - return block.areAllValuesNull(); - } - - @Override - public boolean mayHaveMultivaluedFields() { - /* - * This could return a false positive. The block may have multivalued - * fields, but we're not pointing to any of them. That's acceptable. - */ - return block.mayHaveMultivaluedFields(); - } - - @Override - public final int nullValuesCount() { - if (mayHaveNulls() == false) { - return 0; - } else if (areAllValuesNull()) { - return getPositionCount(); - } else { - int nulls = 0; - for (int i = 0; i < getPositionCount(); i++) { - if (isNull(i)) { - nulls++; - } - } - return nulls; - } - } - - @Override - public final int getTotalValueCount() { - if (positions.length == block.getPositionCount()) { - // All the positions are still in the block, just jumbled. - return block.getTotalValueCount(); - } - // TODO this is expensive. maybe cache or something. - int total = 0; - for (int p = 0; p < positions.length; p++) { - total += getValueCount(p); - } - return total; - } - - @Override - public final int getValueCount(int position) { - return block.getValueCount(mapPosition(position)); - } - - @Override - public final int getPositionCount() { - return positions.length; - } - - @Override - public final int getFirstValueIndex(int position) { - return block.getFirstValueIndex(mapPosition(position)); - } - - @Override - public MvOrdering mvOrdering() { - return block.mvOrdering(); - } - - @Override - public BlockFactory blockFactory() { - return block.blockFactory(); - } - - @Override - public boolean isReleased() { - return block.isReleased(); - } - - private int mapPosition(int position) { - assert assertPosition(position); - return positions[position]; - } - - @Override - public String toString() { - return "FilteredBlock{" + "positions=" + Arrays.toString(positions) + ", block=" + block + '}'; - } - - protected final boolean assertPosition(int position) { - assert (position >= 0 || position < getPositionCount()) - : "illegal position, " + position + ", position count:" + getPositionCount(); - return true; - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterVector.java deleted file mode 100644 index c4f9498670ae9..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterVector.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -/** - * Wraps another vector and only allows access to positions that have not been filtered out. - * - * To ensure fast access, the filter is implemented as an array of positions that map positions in - * the filtered block to positions in the wrapped vector. - */ -abstract class AbstractFilterVector extends AbstractVector { - - protected final int[] positions; - - protected AbstractFilterVector(int[] positions, BlockFactory blockFactory) { - super(positions.length, blockFactory); - this.positions = positions; - } - - protected int mapPosition(int position) { - return positions[position]; - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index 9c00213a33997..dba0ced86e60e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.core.Releasables; import java.io.IOException; import java.util.Objects; @@ -69,8 +68,7 @@ public ElementType elementType() { @Override public Block filter(int... positions) { - Releasables.closeExpectNoException(this); - return new ConstantNullBlock(positions.length); + return blockFactory.newConstantNullBlock(positions.length); } public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index ddb0eced039be..1f9fb93bc65c6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -69,7 +69,29 @@ $endif$ @Override public $Type$Block filter(int... positions) { - return new Filter$Type$Block(this, positions); + $if(BytesRef)$ + final BytesRef scratch = new BytesRef(); + $endif$ + try (var builder = blockFactory.new$Type$BlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.append$Type$(get$Type$(getFirstValueIndex(pos)$if(BytesRef)$, scratch$endif$)); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.append$Type$(get$Type$(first + c$if(BytesRef)$, scratch$endif$)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st index 53385f6908fe6..07ec2230deee1 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st @@ -76,7 +76,19 @@ $endif$ @Override public $Type$Vector filter(int... positions) { - return new Filter$Type$Vector(this, positions); + $if(BytesRef)$ + final var scratch = new BytesRef(); + $endif$ + try ($Type$Vector.Builder builder = blockFactory.new$Type$VectorBuilder(positions.length)) { + for (int pos : positions) { + $if(BytesRef)$ + builder.append$Type$(values.get(pos, scratch)); + $else$ + builder.append$Type$(values[pos]); + $endif$ + } + return builder.build(); + } } public static long ramBytesEstimated($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st index 690a7c9ac89eb..e448d917a65ce 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st @@ -60,7 +60,20 @@ public final class $Type$BigArrayVector extends AbstractVector implements $Type$ @Override public $Type$Vector filter(int... positions) { - return new Filter$Type$Vector(this, positions); + $if(boolean)$ + final BitArray filtered = new BitArray(positions.length, blockFactory.bigArrays()); + for (int i = 0; i < positions.length; i++) { + if (values.get(positions[i])) { + filtered.set(i); + } + } + $else$ + final $Type$Array filtered = blockFactory.bigArrays().new$Type$Array(positions.length, true); + for (int i = 0; i < positions.length; i++) { + filtered.set(i, values.get(positions[i])); + } + $endif$ + return new $Type$BigArrayVector(filtered, positions.length, blockFactory); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index 596f014eaa577..1dac4f1783e44 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -21,7 +21,7 @@ import java.io.IOException; * Block that stores $type$ values. * This class is generated. Do not edit it. */ -public sealed interface $Type$Block extends Block permits Filter$Type$Block, $Type$ArrayBlock, $Type$VectorBlock { +public sealed interface $Type$Block extends Block permits $Type$ArrayBlock, $Type$VectorBlock { $if(BytesRef)$ BytesRef NULL_VALUE = new BytesRef(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterBlock.java.st deleted file mode 100644 index a68b9a9824217..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterBlock.java.st +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -$if(BytesRef)$ -import org.apache.lucene.util.BytesRef; -$endif$ -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter block for $Type$Blocks. - * This class is generated. Do not edit it. - */ -final class Filter$Type$Block extends AbstractFilterBlock implements $Type$Block { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Filter$Type$Block.class); - - private final $Type$Block block; - - Filter$Type$Block($Type$Block block, int... positions) { - super(block, positions); - this.block = block; - } - - @Override - public $Type$Vector asVector() { - return null; - } - - @Override -$if(BytesRef)$ - public BytesRef getBytesRef(int valueIndex, BytesRef dest) { - return block.getBytesRef(valueIndex, dest); -$else$ - public $type$ get$Type$(int valueIndex) { - return block.get$Type$(valueIndex); -$endif$ - } - - @Override - public ElementType elementType() { - return ElementType.$TYPE$; - } - - @Override - public $Type$Block filter(int... positions) { - return new Filter$Type$Block(this, positions); - } - - @Override - public $Type$Block expand() { - if (false == block.mayHaveMultivaluedFields()) { - return this; - } - /* - * Build a copy of the target block, selecting only the positions - * we've been assigned and expanding all multivalued fields - * into single valued fields. - */ - try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(positions.length, blockFactory())) { -$if(BytesRef)$ - BytesRef scratch = new BytesRef(); -$endif$ - for (int p : positions) { - if (block.isNull(p)) { - builder.appendNull(); - continue; - } - int start = block.getFirstValueIndex(p); - int end = start + block.getValueCount(p); - for (int i = start; i < end; i++) { -$if(BytesRef)$ - BytesRef v = block.getBytesRef(i, scratch); - builder.appendBytesRef(v); -$else$ - builder.append$Type$(block.get$Type$(i)); -$endif$ - } - } - return builder.build(); - } - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter blocks encapsulate - // their inner block, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof $Type$Block that) { - return $Type$Block.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return $Type$Block.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount()); - sb.append(", released=" + isReleased()); - if (isReleased() == false) { - sb.append(", values=["); - appendValues(sb); - sb.append("]"); - } - sb.append("]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int p = 0; p < positions; p++) { - if (p > 0) { - sb.append(", "); - } - int start = getFirstValueIndex(p); - int count = getValueCount(p); - if (count == 1) { -$if(BytesRef)$ - sb.append(get$Type$(start, new BytesRef())); -$else$ - sb.append(get$Type$(start)); -$endif$ - continue; - } - sb.append('['); - int end = start + count; - for (int i = start; i < end; i++) { - if (i > start) { - sb.append(", "); - } -$if(BytesRef)$ - sb.append(get$Type$(i, new BytesRef())); -$else$ - sb.append(get$Type$(i)); -$endif$ - } - sb.append(']'); - } - } - - @Override - public void close() { - if (block.isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - Releasables.closeExpectNoException(block); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterVector.java.st deleted file mode 100644 index 814b17483e25a..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterVector.java.st +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.data; - -$if(BytesRef)$ -import org.apache.lucene.util.BytesRef; -$endif$ -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.core.Releasables; - -/** - * Filter vector for $Type$Vectors. - * This class is generated. Do not edit it. - */ -public final class Filter$Type$Vector extends AbstractFilterVector implements $Type$Vector { - - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Filter$Type$Vector.class); - - private final $Type$Vector vector; - - private final $Type$Block block; - - Filter$Type$Vector($Type$Vector vector, int... positions) { - super(positions, vector.blockFactory()); - this.vector = vector; - this.block = new $Type$VectorBlock(this); - } - - @Override -$if(BytesRef)$ - public BytesRef getBytesRef(int position, BytesRef dest) { - return vector.getBytesRef(mapPosition(position), dest); -$else$ - public $type$ get$Type$(int position) { - return vector.get$Type$(mapPosition(position)); -$endif$ - } - - @Override - public $Type$Block asBlock() { - return block; - } - - @Override - public ElementType elementType() { - return ElementType.$TYPE$; - } - - @Override - public boolean isConstant() { - return vector.isConstant(); - } - - @Override - public $Type$Vector filter(int... positions) { - return new Filter$Type$Vector(this, positions); - } - - @Override - public long ramBytesUsed() { - // from a usage and resource point of view filter vectors encapsulate - // their inner vector, rather than listing it as a child resource - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof $Type$Vector that) { - return $Type$Vector.equals(this, that); - } - return false; - } - - @Override - public int hashCode() { - return $Type$Vector.hash(this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(this.getClass().getSimpleName()); - sb.append("[positions=" + getPositionCount() + ", values=["); - appendValues(sb); - sb.append("]]"); - return sb.toString(); - } - - private void appendValues(StringBuilder sb) { - final int positions = getPositionCount(); - for (int i = 0; i < positions; i++) { - if (i > 0) { - sb.append(", "); - } -$if(BytesRef)$ - sb.append(get$Type$(i, new BytesRef())); -$else$ - sb.append(get$Type$(i)); -$endif$ - } - } - - @Override - public BlockFactory blockFactory() { - return vector.blockFactory(); - } - - @Override - public void close() { - Releasables.closeExpectNoException(vector); - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st index 7e0c5b55fb2a6..90fd30f8b7e64 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st @@ -20,15 +20,13 @@ import java.io.IOException; * This class is generated. Do not edit it. */ $if(BytesRef)$ -public sealed interface $Type$Vector extends Vector permits Constant$Type$Vector, Filter$Type$Vector, $Type$ArrayVector { +public sealed interface $Type$Vector extends Vector permits Constant$Type$Vector, $Type$ArrayVector { $elseif(boolean)$ -public sealed interface $Type$Vector extends Vector permits Constant$Type$Vector, Filter$Type$Vector, $Type$ArrayVector, - $Type$BigArrayVector { +public sealed interface $Type$Vector extends Vector permits Constant$Type$Vector, $Type$ArrayVector, $Type$BigArrayVector { $elseif(double)$ -public sealed interface $Type$Vector extends Vector permits Constant$Type$Vector, Filter$Type$Vector, $Type$ArrayVector, - $Type$BigArrayVector { +public sealed interface $Type$Vector extends Vector permits Constant$Type$Vector, $Type$ArrayVector, $Type$BigArrayVector { $else$ -public sealed interface $Type$Vector extends Vector permits Constant$Type$Vector, Filter$Type$Vector, $Type$ArrayVector, $Type$BigArrayVector { +public sealed interface $Type$Vector extends Vector permits Constant$Type$Vector, $Type$ArrayVector, $Type$BigArrayVector { $endif$ $if(BytesRef)$ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 3499681dd49b6..57bb09c87e39e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -52,7 +52,7 @@ $endif$ @Override public $Type$Block filter(int... positions) { - return new Filter$Type$Vector(vector, positions).asBlock(); + return vector.filter(positions).asBlock(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java index 1770af60f2252..be4996e129d7b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/FilterOperator.java @@ -71,10 +71,18 @@ protected Page process(Page page) { positions = Arrays.copyOf(positions, rowCount); Block[] filteredBlocks = new Block[page.getBlockCount()]; - for (int i = 0; i < page.getBlockCount(); i++) { - filteredBlocks[i] = page.getBlock(i).filter(positions); + boolean success = false; + try { + for (int i = 0; i < page.getBlockCount(); i++) { + filteredBlocks[i] = page.getBlock(i).filter(positions); + } + success = true; + } finally { + Releasables.closeExpectNoException(page::releaseBlocks); + if (success == false) { + Releasables.closeExpectNoException(filteredBlocks); + } } - return new Page(filteredBlocks); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java index 7507eb8f978c8..a41057386d365 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/LimitOperator.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasables; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -92,8 +93,18 @@ public Page getOutput() { filter[i] = i; } Block[] blocks = new Block[lastInput.getBlockCount()]; - for (int b = 0; b < blocks.length; b++) { - blocks[b] = lastInput.getBlock(b).filter(filter); + boolean success = false; + try { + for (int b = 0; b < blocks.length; b++) { + blocks[b] = lastInput.getBlock(b).filter(filter); + } + success = true; + } finally { + Releasables.closeExpectNoException(lastInput::releaseBlocks); + lastInput = null; + if (success == false) { + Releasables.closeExpectNoException(blocks); + } } result = new Page(blocks); limitRemaining = 0; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java index 21e0554d6c187..ef8d33a0148b3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java @@ -746,16 +746,19 @@ public void testToStringSmall() { assertThat(s, containsString("positions=2")); } for (IntBlock block : List.of(intBlock, intVector.asBlock())) { - assertThat(block.filter(0).toString(), containsString("FilterIntVector[positions=1, values=[1]]")); - assertThat(block.filter(1).toString(), containsString("FilterIntVector[positions=1, values=[2]]")); - assertThat(block.filter(0, 1).toString(), containsString("FilterIntVector[positions=2, values=[1, 2]]")); - assertThat(block.filter().toString(), containsString("FilterIntVector[positions=0, values=[]]")); + assertThat(block.filter(0).toString(), containsString("IntVectorBlock[vector=ConstantIntVector[positions=1, value=1]]")); + assertThat(block.filter(1).toString(), containsString("IntVectorBlock[vector=ConstantIntVector[positions=1, value=2]]")); + assertThat( + block.filter(0, 1).toString(), + containsString("IntVectorBlock[vector=IntArrayVector[positions=2, values=[1, 2]]]") + ); + assertThat(block.filter().toString(), containsString("IntVectorBlock[vector=IntArrayVector[positions=0, values=[]]]")); } for (IntVector vector : List.of(intVector, intBlock.asVector())) { - assertThat(vector.filter(0).toString(), containsString("FilterIntVector[positions=1, values=[1]]")); - assertThat(vector.filter(1).toString(), containsString("FilterIntVector[positions=1, values=[2]]")); - assertThat(vector.filter(0, 1).toString(), containsString("FilterIntVector[positions=2, values=[1, 2]]")); - assertThat(vector.filter().toString(), containsString("FilterIntVector[positions=0, values=[]]")); + assertThat(vector.filter(0).toString(), containsString("ConstantIntVector[positions=1, value=1]")); + assertThat(vector.filter(1).toString(), containsString("ConstantIntVector[positions=1, value=2]")); + assertThat(vector.filter(0, 1).toString(), containsString("IntArrayVector[positions=2, values=[1, 2]]")); + assertThat(vector.filter().toString(), containsString("IntArrayVector[positions=0, values=[]]")); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java index 51247d5b04bf6..05f77357b9184 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java @@ -20,11 +20,13 @@ import java.util.BitSet; import java.util.Collection; import java.util.Map; +import java.util.stream.IntStream; import static org.apache.lucene.util.RamUsageEstimator.alignObjectSize; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class BlockAccountingTests extends ESTestCase { @@ -47,9 +49,8 @@ public void testBooleanVector() { Vector emptyPlusSome = new BooleanArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + randomData.length))); - // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability Vector filterVector = emptyPlusSome.filter(1); - assertThat(filterVector.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); } public void testIntVector() { @@ -64,9 +65,8 @@ public void testIntVector() { Vector emptyPlusSome = new IntArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Integer.BYTES * randomData.length))); - // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability Vector filterVector = emptyPlusSome.filter(1); - assertThat(filterVector.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); } public void testLongVector() { @@ -81,9 +81,8 @@ public void testLongVector() { Vector emptyPlusSome = new LongArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + (long) Long.BYTES * randomData.length)); - // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability Vector filterVector = emptyPlusSome.filter(1); - assertThat(filterVector.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); } public void testDoubleVector() { @@ -100,7 +99,7 @@ public void testDoubleVector() { // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability Vector filterVector = emptyPlusSome.filter(1); - assertThat(filterVector.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); } public void testBytesRefVector() { @@ -117,27 +116,30 @@ public void testBytesRefVector() { Vector emptyPlusOne = new BytesRefArrayVector(arrayWithOne, 1); assertThat(emptyPlusOne.ramBytesUsed(), between(emptyVector.ramBytesUsed() + bytesRef.length, UPPER_BOUND)); - // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability - Vector filterVector = emptyPlusOne.filter(1); - assertThat(filterVector.ramBytesUsed(), between(emptyPlusOne.ramBytesUsed(), UPPER_BOUND)); + Vector filterVector = emptyPlusOne.filter(0); + assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); } } // Array Blocks public void testBooleanBlock() { - Block empty = new BooleanArrayBlock(new boolean[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); + Block empty = new BooleanArrayBlock(new boolean[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new BooleanArrayBlock(new boolean[] { randomBoolean() }, 1, new int[] {}, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + 1))); + Block emptyPlusOne = new BooleanArrayBlock(new boolean[] { randomBoolean() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + 1) + alignObjectSize(Integer.BYTES))); boolean[] randomData = new boolean[randomIntBetween(1, 1024)]; - Block emptyPlusSome = new BooleanArrayBlock(randomData, randomData.length, new int[] {}, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + randomData.length))); + int[] valueIndices = IntStream.range(0, randomData.length).toArray(); + Block emptyPlusSome = new BooleanArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); + assertThat( + emptyPlusSome.ramBytesUsed(), + is(alignObjectSize(empty.ramBytesUsed() + randomData.length) + alignObjectSize(valueIndices.length * Integer.BYTES)) + ); Block filterBlock = emptyPlusSome.filter(1); - assertThat(filterBlock.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); } public void testBooleanBlockWithNullFirstValues() { @@ -151,15 +153,16 @@ public void testIntBlock() { long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new IntArrayBlock(new int[] { randomInt() }, 1, new int[] {}, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Integer.BYTES))); + Block emptyPlusOne = new IntArrayBlock(new int[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + alignObjectSize(Integer.BYTES) + alignObjectSize(Integer.BYTES))); int[] randomData = new int[randomIntBetween(1, 1024)]; - Block emptyPlusSome = new IntArrayBlock(randomData, randomData.length, new int[] {}, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Integer.BYTES * randomData.length))); + int[] valueIndices = IntStream.range(0, randomData.length).toArray(); + Block emptyPlusSome = new IntArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + alignObjectSize((long) Integer.BYTES * randomData.length) * 2)); Block filterBlock = emptyPlusSome.filter(1); - assertThat(filterBlock.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); } public void testIntBlockWithNullFirstValues() { @@ -169,19 +172,27 @@ public void testIntBlockWithNullFirstValues() { } public void testLongBlock() { - Block empty = new LongArrayBlock(new long[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); + Block empty = new LongArrayBlock(new long[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new LongArrayBlock(new long[] { randomInt() }, 1, new int[] {}, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Long.BYTES))); + Block emptyPlusOne = new LongArrayBlock(new long[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Long.BYTES) + alignObjectSize(Integer.BYTES))); long[] randomData = new long[randomIntBetween(1, 1024)]; - Block emptyPlusSome = new LongArrayBlock(randomData, randomData.length, new int[] {}, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Long.BYTES * randomData.length))); + int[] valueIndices = IntStream.range(0, randomData.length).toArray(); + Block emptyPlusSome = new LongArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); + assertThat( + emptyPlusSome.ramBytesUsed(), + is( + alignObjectSize(empty.ramBytesUsed() + (long) Long.BYTES * randomData.length) + alignObjectSize( + (long) valueIndices.length * Integer.BYTES + ) + ) + ); Block filterBlock = emptyPlusSome.filter(1); - assertThat(filterBlock.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); } public void testLongBlockWithNullFirstValues() { @@ -191,19 +202,27 @@ public void testLongBlockWithNullFirstValues() { } public void testDoubleBlock() { - Block empty = new DoubleArrayBlock(new double[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); + Block empty = new DoubleArrayBlock(new double[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new DoubleArrayBlock(new double[] { randomInt() }, 1, new int[] {}, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Double.BYTES))); + Block emptyPlusOne = new DoubleArrayBlock(new double[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Double.BYTES) + alignObjectSize(Integer.BYTES))); double[] randomData = new double[randomIntBetween(1, 1024)]; - Block emptyPlusSome = new DoubleArrayBlock(randomData, randomData.length, new int[] {}, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Double.BYTES * randomData.length))); + int[] valueIndices = IntStream.range(0, randomData.length).toArray(); + Block emptyPlusSome = new DoubleArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); + assertThat( + emptyPlusSome.ramBytesUsed(), + is( + alignObjectSize(empty.ramBytesUsed() + (long) Double.BYTES * randomData.length) + alignObjectSize( + valueIndices.length * Integer.BYTES + ) + ) + ); Block filterBlock = emptyPlusSome.filter(1); - assertThat(filterBlock.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); } public void testDoubleBlockWithNullFirstValues() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java index 5ab4266023bfe..28721be14f548 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java @@ -26,6 +26,8 @@ import java.util.stream.IntStream; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; @@ -50,10 +52,12 @@ public void testFilterAllPositions() { assertEquals(0, filteredVector.getPositionCount()); expectThrows(ArrayIndexOutOfBoundsException.class, () -> filteredVector.getInt(0)); + filteredVector.close(); var filteredBlock = vector.asBlock().filter(); assertEquals(0, filteredBlock.getPositionCount()); expectThrows(ArrayIndexOutOfBoundsException.class, () -> filteredBlock.getInt(0)); + vector.close(); releaseAndAssertBreaker(filteredBlock); } @@ -66,10 +70,12 @@ public void testKeepAllPositions() { assertEquals(positionCount, filteredVector.getPositionCount()); var anyPosition = randomPosition(positionCount); assertEquals(anyPosition, filteredVector.getInt(anyPosition)); + filteredVector.close(); var filteredBlock = vector.filter(positions).asBlock(); assertEquals(positionCount, filteredBlock.getPositionCount()); assertEquals(anyPosition, filteredBlock.getInt(anyPosition)); + Releasables.close(vector); releaseAndAssertBreaker(filteredBlock); } @@ -83,10 +89,12 @@ public void testKeepSomePositions() { var anyPosition = randomIntBetween(0, (positionCount / 2) - 1); assertEquals(anyPosition * 2, filteredVector.getInt(anyPosition)); assertEquals(anyPosition * 2, filteredVector.asBlock().getInt(anyPosition)); + filteredVector.close(); var filteredBlock = vector.asBlock().filter(positions); assertEquals(positionCount / 2, filteredBlock.getPositionCount()); assertEquals(anyPosition * 2, filteredBlock.getInt(anyPosition)); + vector.close(); releaseAndAssertBreaker(filteredBlock); } @@ -100,6 +108,7 @@ public void testFilterOnFilter() { // TODO: tired of this sv / mv block here. d assertEquals(positionCount / 4, filteredTwice.getPositionCount()); var anyPosition = randomIntBetween(0, positionCount / 4 - 1); assertEquals(anyPosition * 4, filteredTwice.getInt(anyPosition)); + Releasables.close(vector, filteredVector); releaseAndAssertBreaker(filteredTwice); } @@ -127,6 +136,7 @@ public void testFilterOnNull() { assertEquals(2, filtered.getTotalValueCount()); assertFalse(filtered.isNull(1)); assertEquals(30, filtered.getInt(filtered.getFirstValueIndex(1))); + Releasables.closeExpectNoException(block); releaseAndAssertBreaker(filtered); } @@ -152,6 +162,7 @@ public void testFilterOnAllNullsBlock() { assertTrue(filtered.areAllValuesNull()); assertEquals(3, filtered.nullValuesCount()); assertEquals(0, filtered.getTotalValueCount()); + block.close(); releaseAndAssertBreaker(filtered); } @@ -178,6 +189,7 @@ public void testFilterOnNoNullsBlock() { assertEquals(20, filtered.asVector().getInt(0)); assertEquals(30, filtered.asVector().getInt(1)); assertEquals(40, filtered.asVector().getInt(2)); + block.close(); releaseAndAssertBreaker(filtered); } @@ -227,60 +239,81 @@ public void testFilterToStringSimple() { var bytesRefVector = new BytesRefArrayVector(bytesRefArray, 4); var bytesRefBlock = new BytesRefArrayBlock(bytesRefArray, 4, null, nulls, randomFrom(Block.MvOrdering.values())); for (Object obj : List.of(bytesRefVector.filter(0, 2), bytesRefVector.asBlock().filter(0, 2), bytesRefBlock.filter(0, 2))) { - String s = obj.toString(); - assertThat(s, containsString("[[31 61], [33 63]]")); - assertThat(s, containsString("positions=2")); + assertThat( + obj.toString(), + either(equalTo("BytesRefArrayVector[positions=2]")).or( + equalTo("BytesRefVectorBlock[vector=BytesRefArrayVector[positions=2]]") + ) + ); } } } public void testFilterToStringMultiValue() { - var bb = blockFactory.newBooleanBlockBuilder(6); - bb.beginPositionEntry().appendBoolean(true).appendBoolean(true).endPositionEntry(); - bb.beginPositionEntry().appendBoolean(false).appendBoolean(false).endPositionEntry(); - bb.beginPositionEntry().appendBoolean(false).appendBoolean(false).endPositionEntry(); - Block filter = bb.build().filter(0, 1); - assertThat(filter.toString(), containsString("[[true, true], [false, false]]")); - assertThat(filter.toString(), containsString("positions=2")); - releaseAndAssertBreaker(filter); - - var ib = blockFactory.newIntBlockBuilder(6); - ib.beginPositionEntry().appendInt(0).appendInt(10).endPositionEntry(); - ib.beginPositionEntry().appendInt(20).appendInt(50).endPositionEntry(); - ib.beginPositionEntry().appendInt(90).appendInt(1000).endPositionEntry(); - filter = ib.build().filter(0, 1); - assertThat(filter.toString(), containsString("[[0, 10], [20, 50]]")); - assertThat(filter.toString(), containsString("positions=2")); - releaseAndAssertBreaker(filter); - - var lb = blockFactory.newLongBlockBuilder(6); - lb.beginPositionEntry().appendLong(0).appendLong(10).endPositionEntry(); - lb.beginPositionEntry().appendLong(20).appendLong(50).endPositionEntry(); - lb.beginPositionEntry().appendLong(90).appendLong(1000).endPositionEntry(); - filter = lb.build().filter(0, 1); - assertThat(filter.toString(), containsString("[[0, 10], [20, 50]]")); - assertThat(filter.toString(), containsString("positions=2")); - releaseAndAssertBreaker(filter); - - var db = blockFactory.newDoubleBlockBuilder(6); - db.beginPositionEntry().appendDouble(0).appendDouble(10).endPositionEntry(); - db.beginPositionEntry().appendDouble(0.002).appendDouble(10e8).endPositionEntry(); - db.beginPositionEntry().appendDouble(90).appendDouble(1000).endPositionEntry(); - filter = db.build().filter(0, 1); - assertThat(filter.toString(), containsString("[[0.0, 10.0], [0.002, 1.0E9]]")); - assertThat(filter.toString(), containsString("positions=2")); - releaseAndAssertBreaker(filter); - - assert new BytesRef("1a").toString().equals("[31 61]") && new BytesRef("3c").toString().equals("[33 63]"); - assert new BytesRef("cat").toString().equals("[63 61 74]") && new BytesRef("dog").toString().equals("[64 6f 67]"); - var bytesBlock = blockFactory.newBytesRefBlockBuilder(6); - bytesBlock.beginPositionEntry().appendBytesRef(new BytesRef("1a")).appendBytesRef(new BytesRef("3c")).endPositionEntry(); - bytesBlock.beginPositionEntry().appendBytesRef(new BytesRef("cat")).appendBytesRef(new BytesRef("dog")).endPositionEntry(); - bytesBlock.beginPositionEntry().appendBytesRef(new BytesRef("pig")).appendBytesRef(new BytesRef("chicken")).endPositionEntry(); - filter = bytesBlock.build().filter(0, 1); - assertThat(filter.toString(), containsString("[[[31 61], [33 63]], [[63 61 74], [64 6f 67]]")); - assertThat(filter.toString(), containsString("positions=2")); - releaseAndAssertBreaker(filter); + { + var builder = blockFactory.newBooleanBlockBuilder(6); + builder.beginPositionEntry().appendBoolean(true).appendBoolean(true).endPositionEntry(); + builder.beginPositionEntry().appendBoolean(false).appendBoolean(false).endPositionEntry(); + builder.beginPositionEntry().appendBoolean(false).appendBoolean(false).endPositionEntry(); + BooleanBlock block = builder.build(); + var filter = block.filter(0, 1); + assertThat( + filter.toString(), + containsString("BooleanArrayBlock[positions=2, mvOrdering=UNORDERED, values=[true, true, false, false]]") + ); + Releasables.close(builder, block); + releaseAndAssertBreaker(filter); + } + { + var builder = blockFactory.newIntBlockBuilder(6); + builder.beginPositionEntry().appendInt(0).appendInt(10).endPositionEntry(); + builder.beginPositionEntry().appendInt(20).appendInt(50).endPositionEntry(); + builder.beginPositionEntry().appendInt(90).appendInt(1000).endPositionEntry(); + var block = builder.build(); + var filter = block.filter(0, 1); + assertThat(filter.toString(), containsString("IntArrayBlock[positions=2, mvOrdering=UNORDERED, values=[0, 10, 20, 50]]")); + Releasables.close(builder, block); + releaseAndAssertBreaker(filter); + } + { + var builder = blockFactory.newLongBlockBuilder(6); + builder.beginPositionEntry().appendLong(0).appendLong(10).endPositionEntry(); + builder.beginPositionEntry().appendLong(20).appendLong(50).endPositionEntry(); + builder.beginPositionEntry().appendLong(90).appendLong(1000).endPositionEntry(); + var block = builder.build(); + var filter = block.filter(0, 1); + assertThat(filter.toString(), containsString("LongArrayBlock[positions=2, mvOrdering=UNORDERED, values=[0, 10, 20, 50]]")); + Releasables.close(builder, block); + releaseAndAssertBreaker(filter); + } + { + var builder = blockFactory.newDoubleBlockBuilder(6); + builder.beginPositionEntry().appendDouble(0).appendDouble(10).endPositionEntry(); + builder.beginPositionEntry().appendDouble(0.002).appendDouble(10e8).endPositionEntry(); + builder.beginPositionEntry().appendDouble(90).appendDouble(1000).endPositionEntry(); + var block = builder.build(); + var filter = block.filter(0, 1); + assertThat( + filter.toString(), + containsString("DoubleArrayBlock[positions=2, mvOrdering=UNORDERED, values=[0.0, 10.0, 0.002, 1.0E9]]") + ); + Releasables.close(builder, block); + releaseAndAssertBreaker(filter); + } + { + assert new BytesRef("1a").toString().equals("[31 61]") && new BytesRef("3c").toString().equals("[33 63]"); + assert new BytesRef("cat").toString().equals("[63 61 74]") && new BytesRef("dog").toString().equals("[64 6f 67]"); + var builder = blockFactory.newBytesRefBlockBuilder(6); + builder.beginPositionEntry().appendBytesRef(new BytesRef("1a")).appendBytesRef(new BytesRef("3c")).endPositionEntry(); + builder.beginPositionEntry().appendBytesRef(new BytesRef("cat")).appendBytesRef(new BytesRef("dog")).endPositionEntry(); + builder.beginPositionEntry().appendBytesRef(new BytesRef("pig")).appendBytesRef(new BytesRef("chicken")).endPositionEntry(); + var block = builder.build(); + var filter = block.filter(0, 1); + assertThat(filter.toString(), containsString("BytesRefArrayBlock[positions=2, mvOrdering=UNORDERED, values=4]")); + assertThat(filter.getPositionCount(), equalTo(2)); + Releasables.close(builder, block); + releaseAndAssertBreaker(filter); + } } static int randomPosition(int positionCount) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java index da10debb54c7e..fa4c7bea7c9cc 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java @@ -124,11 +124,4 @@ protected ByteSizeValue smallEnoughToCircuitBreak() { protected DriverContext driverContext() { // TODO remove this when the parent uses a breaking block factory return breakingDriverContext(); } - - // TODO: remove this once possible - // https://github.com/elastic/elasticsearch/issues/99826 - @Override - protected boolean canLeak() { - return true; - } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java index b5d078754b26d..76f99389a697b 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java @@ -97,13 +97,6 @@ public void testNeedInput() { } } - // TODO: remove this once possible - // https://github.com/elastic/elasticsearch/issues/99826 - @Override - protected boolean canLeak() { - return true; - } - public void testBlockBiggerThanRemaining() { BlockFactory blockFactory = driverContext().blockFactory(); for (int i = 0; i < 100; i++) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java index 5395f753ce47c..744f9f3815e4e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java @@ -209,26 +209,28 @@ public Block.Ref eval(Page page) { Page limited = new Page( IntStream.range(0, page.getBlockCount()).mapToObj(b -> page.getBlock(b).filter(positions)).toArray(Block[]::new) ); - for (ConditionEvaluator condition : conditions) { - try (Block.Ref conditionRef = condition.condition.eval(limited)) { - if (conditionRef.block().areAllValuesNull()) { - continue; - } - BooleanBlock b = (BooleanBlock) conditionRef.block(); - if (b.isNull(0)) { - continue; - } - if (false == b.getBoolean(b.getFirstValueIndex(0))) { - continue; - } - try (Block.Ref valueRef = condition.value.eval(limited)) { - result.copyFrom(valueRef.block(), 0, 1); - continue position; + try (Releasable ignored = limited::releaseBlocks) { + for (ConditionEvaluator condition : conditions) { + try (Block.Ref conditionRef = condition.condition.eval(limited)) { + if (conditionRef.block().areAllValuesNull()) { + continue; + } + BooleanBlock b = (BooleanBlock) conditionRef.block(); + if (b.isNull(0)) { + continue; + } + if (false == b.getBoolean(b.getFirstValueIndex(0))) { + continue; + } + try (Block.Ref valueRef = condition.value.eval(limited)) { + result.copyFrom(valueRef.block(), 0, 1); + continue position; + } } } - } - try (Block.Ref elseRef = elseVal.eval(limited)) { - result.copyFrom(elseRef.block(), 0, 1); + try (Block.Ref elseRef = elseVal.eval(limited)) { + result.copyFrom(elseRef.block(), 0, 1); + } } } return Block.Ref.floating(result.build()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java index a3f4b6e69648a..93086317be45c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java @@ -13,6 +13,7 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; @@ -149,15 +150,17 @@ public Block.Ref eval(Page page) { 1, IntStream.range(0, page.getBlockCount()).mapToObj(b -> page.getBlock(b).filter(positions)).toArray(Block[]::new) ); - for (EvalOperator.ExpressionEvaluator eval : evaluators) { - try (Block.Ref ref = eval.eval(limited)) { - if (false == ref.block().isNull(0)) { - result.copyFrom(ref.block(), 0, 1); - continue position; + try (Releasable ignored = limited::releaseBlocks) { + for (EvalOperator.ExpressionEvaluator eval : evaluators) { + try (Block.Ref ref = eval.eval(limited)) { + if (false == ref.block().isNull(0)) { + result.copyFrom(ref.block(), 0, 1); + continue position; + } } } + result.appendNull(); } - result.appendNull(); } return Block.Ref.floating(result.build()); } From fe37b599a18c29cfb4df250afa21bfd442d1317d Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Mon, 9 Oct 2023 22:27:32 +0200 Subject: [PATCH 087/176] Make test result deterministic (#100539) Add a sort to an ESQL test query. --- .../qa/testFixtures/src/main/resources/ints.csv-spec | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index aba756587f223..68ed0319047fd 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -473,17 +473,15 @@ ROW deg = [90, 180, 270] [90, 180, 270] | [1.5707963267948966, 3.141592653589793, 4.71238898038469] ; -// AwaitsFix: https://github.com/elastic/elasticsearch/issues/100163 -warningWithFromSource-Ignore -from employees | eval x = to_long(emp_no) * 10000000 | eval y = to_int(x) > 1 | keep y | limit 1; -warning:Line 1:65: evaluation of [to_int(x)] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:65: org.elasticsearch.xpack.ql.QlIllegalArgumentException: [100010000000] out of [integer] range +warningWithFromSource +from employees | sort emp_no | limit 1 | eval x = to_long(emp_no) * 10000000 | eval y = to_int(x) > 1 | keep y; +warning:Line 1:89: evaluation of [to_int(x)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:89: org.elasticsearch.xpack.ql.QlIllegalArgumentException: [100010000000] out of [integer] range y:boolean null ; -// AwaitsFix: https://github.com/elastic/elasticsearch/issues/100163 // the test is also notable through having the "failing" operation in the filter, which will be part of the fragment sent to a data node multipleWarnings-Ignore from employees | sort emp_no | eval x = to_long(emp_no) * 10000000 | where to_int(x) > 1 | keep x | limit 1; From d2fa1ee4f10154fb61af8d95a50c17d7990c4afe Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 9 Oct 2023 16:27:52 -0400 Subject: [PATCH 088/176] ESQL: Don't log the parse tree (#100545) ESQL would log the antlr parse tree of it statements. These are kind of unreadable: ``` {"@timestamp":"2023-10-09T15:13:43.921Z", "log.level":"DEBUG", "message":"Parse tree: ([] ([94] ([2 94] ([2 2 94] ([2 2 2 94] ([2 2 2 2 94] ([2 2 2 2 2 94] ([2 2 2 2 2 2 94] ([98 2 2 2 2 2 2 94] ([109 98 2 2 2 2 2 2 94] from ([256 109 98 2 2 2 2 2 2 94] employees)))) | ([102 2 2 2 2 2 94] ([118 102 2 2 2 2 2 94] sort ([364 118 102 2 2 2 2 2 94] ([372 364 118 102 2 2 2 2 2 94] ([135 372 364 118 102 2 2 2 2 2 94] ([189 135 372 364 118 102 2 2 2 2 2 94] ([197 189 135 372 364 118 102 2 2 2 2 2 94] ([214 197 189 135 372 364 118 102 2 2 2 2 2 94] ([306 214 197 189 135 372 364 118 102 2 2 2 2 2 94] emp_no)))))))))) | ([102 2 2 2 2 94] ([120 102 2 2 2 2 94] where ([130 120 102 2 2 2 2 94] ([135 130 120 102 2 2 2 2 94] ([190 135 130 120 102 2 2 2 2 94] ([197 190 135 130 120 102 2 2 2 2 94] ([215 197 190 135 130 120 102 2 2 2 2 94] ([222 215 197 190 135 130 120 102 2 2 2 2 94] mv_count) ( ([225 215 197 190 135 130 120 102 2 2 2 2 94] ([135 225 215 197 190 135 130 120 102 2 2 2 2 94] ([189 135 225 215 197 190 135 130 120 102 2 2 2 2 94] ([197 189 135 225 215 197 190 135 130 120 102 2 2 2 2 94] ([214 197 189 135 225 215 197 190 135 130 120 102 2 2 2 2 94] ([306 214 197 189 135 225 215 197 190 135 130 120 102 2 2 2 2 94] job_positions)))))) )))) ([191 135 130 120 102 2 2 2 2 94] <=) ([192 135 130 120 102 2 2 2 2 94] ([197 192 135 130 120 102 2 2 2 2 94] ([213 197 192 135 130 120 102 2 2 2 2 94] ([321 213 197 192 135 130 120 102 2 2 2 2 94] 1))))))))) | ([102 2 2 2 94] ([120 102 2 2 2 94] where ([130 120 102 2 2 2 94] ([135 130 120 102 2 2 2 94] ([190 135 130 120 102 2 2 2 94] ([197 190 135 130 120 102 2 2 2 94] ([214 197 190 135 130 120 102 2 2 2 94] ([306 214 197 190 135 130 120 102 2 2 2 94] emp_no)))) ([191 135 130 120 102 2 2 2 94] >=) ([192 135 130 120 102 2 2 2 94] ([197 192 135 130 120 102 2 2 2 94] ([213 197 192 135 130 120 102 2 2 2 94] ([321 213 197 192 135 130 120 102 2 2 2 94] 10024))))))))) | ([102 2 2 94] ([116 102 2 2 94] limit 3))) | ([102 2 94] ([117 102 2 94] keep ([381 117 102 2 94] emp_no) , ([383 117 102 2 94] job_positions)))) | ([102 94] ([114 102 94] eval ([280 114 102 94] ([240 280 114 102 94] ([249 240 280 114 102 94] ([306 249 240 280 114 102 94] is_in)) = ([251 240 280 114 102 94] ([137 251 240 280 114 102 94] ([189 137 251 240 280 114 102 94] ([197 189 137 251 240 280 114 102 94] ([214 197 189 137 251 240 280 114 102 94] ([306 214 197 189 137 251 240 280 114 102 94] job_positions))))) in ( ([143 251 240 280 114 102 94] ([189 143 251 240 280 114 102 94] ([197 189 143 251 240 280 114 102 94] ([213 197 189 143 251 240 280 114 102 94] ([324 213 197 189 143 251 240 280 114 102 94] \"Accountant\"))))) , ([145 251 240 280 114 102 94] ([189 145 251 240 280 114 102 94] ([197 189 145 251 240 280 114 102 94] ([213 197 189 145 251 240 280 114 102 94] ([324 213 197 189 145 251 240 280 114 102 94] \"Internship\"))))) , ([145 251 240 280 114 102 94] ([189 145 251 240 280 114 102 94] ([197 189 145 251 240 280 114 102 94] ([213 197 189 145 251 240 280 114 102 94] null)))) ))))))) )", "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.server","process.thread.name":"elasticsearch[javaRestTest-0][esql][T#8]","log.logger":"org.elasticsearch.xpack.esql.parser.EsqlParser","elasticsearch.cluster.uuid":"CqmNvJFtTdWUGNY9m-7ccw","elasticsearch.node.id":"njbEEK7jTqiZned-iIMKuQ","elasticsearch.node.name":"javaRestTest-0","elasticsearch.cluster.name":"javaRestTest"} ``` See? That's useful if you are debugging ANTLR, but unless you really need it it's kind of in the way. And, if the query is really big, it's like an 80mb single line string. Oooof. Bad times. So! This moves that log from `debug` to `trace` and turn off trace level logging in esql. Which I'd left on accidentally a long while back. --- distribution/src/config/log4j2.properties | 3 --- .../java/org/elasticsearch/xpack/esql/parser/EsqlParser.java | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 0d2df5e3ba2e1..36b5b03d9a110 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -52,9 +52,6 @@ appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumul appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB ################################################ -logger.esql.name = org.elasticsearch.xpack.esql -logger.esql.level = trace - rootLogger.level = info rootLogger.appenderRef.console.ref = console rootLogger.appenderRef.rolling.ref = rolling diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java index 2f527c54430a7..8baee4be14914 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java @@ -72,8 +72,8 @@ private T invokeParser( ParserRuleContext tree = parseFunction.apply(parser); - if (log.isDebugEnabled()) { - log.debug("Parse tree: {}", tree.toStringTree()); + if (log.isTraceEnabled()) { + log.trace("Parse tree: {}", tree.toStringTree()); } return result.apply(new AstBuilder(paramTokens), tree); From 22f9eaac5568b168990036a287b0612b9a3591b9 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Mon, 9 Oct 2023 23:35:18 +0300 Subject: [PATCH 089/176] ESQL: Remove unnecessary AwaitsFix (#100543) Update the tests and remove an AwaitsFix --- .../xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 80fd51cacd163..63f6b39c8077c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -296,7 +296,6 @@ public void testAnotherCountAllWithFilter() { assertThat(expected.toString(), is(esStatsQuery.query().toString())); } - @AwaitsFix(bugUrl = "intermediateAgg does proper reduction but the agg itself does not - the optimizer needs to improve") public void testMultiCountAllWithFilter() { var plan = plan(""" from test @@ -311,7 +310,7 @@ public void testMultiCountAllWithFilter() { var exchange = as(agg.child(), ExchangeExec.class); var esStatsQuery = as(exchange.child(), EsStatsQueryExec.class); assertThat(esStatsQuery.limit(), is(nullValue())); - assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); + assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen", "count", "seen", "count", "seen")); var expected = wrapWithSingleQuery(QueryBuilders.rangeQuery("emp_no").gt(10010), "emp_no"); assertThat(expected.toString(), is(esStatsQuery.query().toString())); } From 3b3168b1180c9746093e706abff7c596499ad502 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 9 Oct 2023 12:36:30 -0700 Subject: [PATCH 090/176] Use last good commit when updating serverless submodule --- .../scripts/update-serverless-submodule.sh | 17 +++++++++++++++++ .buildkite/update-es-serverless.yml | 8 ++------ 2 files changed, 19 insertions(+), 6 deletions(-) create mode 100755 .buildkite/scripts/update-serverless-submodule.sh diff --git a/.buildkite/scripts/update-serverless-submodule.sh b/.buildkite/scripts/update-serverless-submodule.sh new file mode 100755 index 0000000000000..bd2c6d7a5f793 --- /dev/null +++ b/.buildkite/scripts/update-serverless-submodule.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -euo pipefail + +INTAKE_PIPELINE_SLUG="elasticsearch-intake" +BUILD_JSON=$(curl -sH "Authorization: Bearer ${BUILDKITE_API_TOKEN}" "https://api.buildkite.com/v2/organizations/elastic/pipelines/${INTAKE_PIPELINE_SLUG}/builds?branch=main&state=passed&per_page=1" | jq '.[0] | {commit: .commit, url: .web_url}') +LAST_GOOD_COMMIT=$(echo "${BUILD_JSON}" | jq -r '.commit') + +cat < Date: Mon, 9 Oct 2023 21:57:14 +0100 Subject: [PATCH 091/176] Fix test (#100547) This commit fixes the intermittent failure of BreakingBytesRefBuilderTests. The issue is that the test uses the BreakingBytesRefBuilder's internal BytesRef array length to determine whether to expect a circuit breaker exception or not. Where it should use the builders length (not the capacity, a.k.a the internal BytesRef array length). The test failed intermittently about one in 10-20 runs before the change. The test passes successfully 100s of thousands of times with the fix. closes #99649 --- .../compute/operator/BreakingBytesRefBuilderTests.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java index 3f03823815249..24f5297a0d6fe 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/BreakingBytesRefBuilderTests.java @@ -51,7 +51,6 @@ public void applyToOracle(BytesRefBuilder oracle) { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99649") public void testAddBytesRef() { testAgainstOracle(() -> new TestIteration() { BytesRef ref = new BytesRef(randomAlphaOfLengthBetween(1, 100)); @@ -73,7 +72,6 @@ public void applyToOracle(BytesRefBuilder oracle) { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99649") public void testGrow() { testAgainstOracle(() -> new TestIteration() { int length = between(1, 100); @@ -123,7 +121,7 @@ private void testAgainstOracle(Supplier iterations) { boolean willResize = builder.length() + iteration.size() >= builder.bytes().length; if (willResize) { long resizeMemoryUsage = BreakingBytesRefBuilder.SHALLOW_SIZE + ramForArray(builder.bytes().length); - resizeMemoryUsage += ramForArray(ArrayUtil.oversize(builder.bytes().length + iteration.size(), Byte.BYTES)); + resizeMemoryUsage += ramForArray(ArrayUtil.oversize(builder.length() + iteration.size(), Byte.BYTES)); if (resizeMemoryUsage > limit) { Exception e = expectThrows(CircuitBreakingException.class, () -> iteration.applyToBuilder(builder)); assertThat(e.getMessage(), equalTo("over test limit")); From cab1429c72704bc3ba4f44e2e0d132d0678d0d35 Mon Sep 17 00:00:00 2001 From: Chris Cressman Date: Mon, 9 Oct 2023 17:11:39 -0400 Subject: [PATCH 092/176] [DOCS] Update links to migrating Search docs (#100237) Some Search docs are migrating from Enterprise Search to Elasticsearch. Update links to these docs to use the new locations. Also update titles and surrounding text. --- .../apis/post-analytics-collection-event.asciidoc | 2 +- docs/reference/ingest.asciidoc | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc index d20134644dde4..84d9cb5351799 100644 --- a/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc +++ b/docs/reference/behavioral-analytics/apis/post-analytics-collection-event.asciidoc @@ -40,8 +40,8 @@ Post an event to an Analytics Collection. [[post-analytics-collection-event-request-body]] ==== {api-request-body-title} -Full request body parameters can be found in {enterprise-search-ref}/analytics-events-reference.html[Enterprise Analytics Events^]. +Full request body parameters can be found in: <>. [[post-analytics-collection-event-prereqs]] ==== {api-prereq-title} diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index c0ef4d852b84d..ddba7c4e775ce 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -448,11 +448,11 @@ configuration. See {fleet-guide}/install-standalone-elastic-agent.html[Install s [discrete] [[pipelines-in-enterprise-search]] -=== Pipelines in Enterprise Search +=== Pipelines for search indices -When you create Elasticsearch indices for {enterprise-search-ref}/index.html[Enterprise Search^] use cases, for example, using the {enterprise-search-ref}/crawler.html[web crawler^] or {enterprise-search-ref}/connectors.html[connectors^], these indices are automatically set up with specific ingest pipelines. +When you create Elasticsearch indices for search use cases, for example, using the {enterprise-search-ref}/crawler.html[web crawler^] or {enterprise-search-ref}/connectors.html[connectors^], these indices are automatically set up with specific ingest pipelines. These processors help optimize your content for search. -Refer to the {enterprise-search-ref}/ingest-pipelines.html[Enterprise Search documentation^] for more information. +See <> for more information. [discrete] [[access-source-fields]] From 4eb72d358e19977806d2c09521b3d8963b00628f Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Mon, 9 Oct 2023 14:47:13 -0700 Subject: [PATCH 093/176] Fix build failures when setting RUNTIME_JAVA_HOME (#100554) Follow up to #99922. Removing `org.gradle.java.installations.fromEnv` from our `gradle.properties` file had the side effect of breaking the `RUNTIME_JAVA_HOME` environment variable since Gradle is unaware of this toolchain. We need to add `RUNTIME_JAVA_HOME` back here so that setting this environment variable still works and avoid failures like this one: https://gradle-enterprise.elastic.co/s/gaezgaglsn76o/failure?expanded-stacktrace=WyIwLTEtMiJd#1 --- gradle.properties | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gradle.properties b/gradle.properties index 7ad5f24829cc0..64cb394206e66 100644 --- a/gradle.properties +++ b/gradle.properties @@ -15,3 +15,6 @@ org.gradle.java.installations.auto-detect=false # log some dependency verification info to console org.gradle.dependency.verification.console=verbose + +# allow user to specify toolchain via the RUNTIME_JAVA_HOME environment variable +org.gradle.java.installations.fromEnv=RUNTIME_JAVA_HOME From 1f85ad561b67a9426e4f9ff819ca9df39940b098 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 9 Oct 2023 17:27:42 -0500 Subject: [PATCH 094/176] Updating ingest-common rest tests to run in new rest test framework (#100537) --- modules/ingest-common/build.gradle | 8 ++++-- .../IngestCommonClientYamlTestSuiteIT.java | 25 +++++++++++++++++++ 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index d7709115b8daa..90d52de6f0fff 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -29,6 +29,10 @@ restResources { } } +tasks.named('yamlRestTest') { + usesDefaultDistribution() +} + testClusters.configureEach { // Needed in order to test ingest pipeline templating: // (this is because the integTest node is not using default distribution, but only the minimal number of required modules) diff --git a/modules/ingest-common/src/yamlRestTest/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java b/modules/ingest-common/src/yamlRestTest/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java index fa9e4d32000f7..889fa0b101c06 100644 --- a/modules/ingest-common/src/yamlRestTest/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java +++ b/modules/ingest-common/src/yamlRestTest/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java @@ -11,8 +11,14 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class IngestCommonClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -24,4 +30,23 @@ public IngestCommonClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate t public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", new SecureString("x-pack-test-password")); + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); + } + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.security.enabled", "true") + .user("x_pack_rest_user", "x-pack-test-password") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } From 878127ce7732e2ddab7c06fda375f3ef24b327f2 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 9 Oct 2023 17:29:30 -0500 Subject: [PATCH 095/176] Avoiding using the _template API in an iangest-common yaml rest test (#100540) --- .../rest-api-spec/test/ingest/230_change_target_index.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/230_change_target_index.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/230_change_target_index.yml index d9154174379bd..0114484e723a5 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/230_change_target_index.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/230_change_target_index.yml @@ -102,12 +102,13 @@ teardown: "Test Change Target Index with Default Pipeline": - do: - indices.put_template: + indices.put_index_template: name: index_template body: index_patterns: test - settings: - default_pipeline: "retarget" + template: + settings: + default_pipeline: "retarget" - do: ingest.put_pipeline: From bf145309be844c33829367dff6991f5e0c0709fd Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Tue, 10 Oct 2023 11:42:33 +1100 Subject: [PATCH 096/176] Additional trace logging for Native Role Mappings (#100046) Adds trace logging in a couple of places in `NativeRoleMappingStore` to assist in debugging the sequence of changes from role mapping update (e.g. via `ReservedRoleMappingAction`) and the resolution of user access --- .../mapper/NativeRoleMappingStore.java | 27 +++++++++---------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index 18d9070d08a33..218e120e30941 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -136,21 +136,17 @@ protected void loadMappings(ActionListener> listener ScrollHelper.fetchAllByEntity( client, request, - new ContextPreservingActionListener<>( - supplier, - ActionListener.wrap( - (Collection mappings) -> listener.onResponse( - mappings.stream().filter(Objects::nonNull).toList() - ), - ex -> { - logger.error( - () -> format("failed to load role mappings from index [%s] skipping all mappings.", SECURITY_MAIN_ALIAS), - ex - ); - listener.onResponse(Collections.emptyList()); - } - ) - ), + new ContextPreservingActionListener<>(supplier, ActionListener.wrap((Collection mappings) -> { + final List mappingList = mappings.stream().filter(Objects::nonNull).toList(); + logger.debug("successfully loaded [{}] role-mapping(s) from [{}]", mappingList.size(), securityIndex.aliasName()); + listener.onResponse(mappingList); + }, ex -> { + logger.error( + () -> format("failed to load role mappings from index [%s] skipping all mappings.", SECURITY_MAIN_ALIAS), + ex + ); + listener.onResponse(Collections.emptyList()); + })), doc -> buildMapping(getNameFromId(doc.getId()), doc.getSourceRef()) ); } @@ -202,6 +198,7 @@ private void modifyMapping( ); } else { try { + logger.trace("Modifying role mapping [{}] for [{}]", name, request.getClass().getSimpleName()); inner.accept(request, ActionListener.wrap(r -> refreshRealms(listener, r), listener::onFailure)); } catch (Exception e) { logger.error(() -> "failed to modify role-mapping [" + name + "]", e); From 1ba7c09fec8f601ea2d8d70855a36c8790a42d1f Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 9 Oct 2023 21:50:47 -0400 Subject: [PATCH 097/176] ESQL: Make test less precise (#100556) This changes an assertion in a test for median absolute deviation that hit floating point rounding errors. It happens fairly rarely, maybe 2% of the time but is reproducable with the right key. --- ...uteDeviationDoubleGroupingAggregatorFunctionTests.java | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java index 20d8dd3b46caf..8ea4f34ff0d50 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests.java @@ -23,7 +23,7 @@ import java.util.stream.Collectors; import java.util.stream.DoubleStream; -import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.closeTo; public class MedianAbsoluteDeviationDoubleGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { @@ -58,10 +58,8 @@ protected String expectedDescriptionOfAggregator() { @Override protected void assertSimpleGroup(List input, Block result, int position, Long group) { - assertThat( - ((DoubleBlock) result).getDouble(position), - equalTo(medianAbsoluteDeviation(input.stream().flatMapToDouble(p -> allDoubles(p, group)))) - ); + double medianAbsoluteDeviation = medianAbsoluteDeviation(input.stream().flatMapToDouble(p -> allDoubles(p, group))); + assertThat(((DoubleBlock) result).getDouble(position), closeTo(medianAbsoluteDeviation, medianAbsoluteDeviation * .000001)); } static double medianAbsoluteDeviation(DoubleStream s) { From 264c5710b963928709fffcdb5563080351f35e9a Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 10 Oct 2023 08:29:12 +0200 Subject: [PATCH 098/176] Stop using RandomIndexWriter in IdLoaderTests. (#100521) The `RandomIndexWriter` is a wrapper around IndexWriter, that randomly does additional operations that improve test cover. For example random merging and flushing. However, this the `testSynthesizeIdMultipleSegments()` test can't handle random flushes. This change alters the test case to use IndexWriter instead. Closes #100246 --- .../index/mapper/IdLoaderTests.java | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java index 9b58eba01f5d3..e67873fe4c761 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java @@ -14,6 +14,7 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReader; @@ -23,7 +24,6 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.analysis.MockAnalyzer; -import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -69,7 +69,6 @@ public void testSynthesizeIdSimple() throws Exception { prepareIndexReader(indexAndForceMerge(routing, docs), verify, false); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100246") public void testSynthesizeIdMultipleSegments() throws Exception { var routingPaths = List.of("dim1"); var routing = createRouting(routingPaths); @@ -94,7 +93,7 @@ public void testSynthesizeIdMultipleSegments() throws Exception { new Doc(startTime - 2, List.of(new Dimension("dim1", "bbb"), new Dimension("dim2", "yyy"))), new Doc(startTime - 3, List.of(new Dimension("dim1", "bbb"), new Dimension("dim2", "yyy"))) ); - CheckedConsumer buildIndex = writer -> { + CheckedConsumer buildIndex = writer -> { for (Doc doc : docs1) { indexDoc(routing, writer, doc); } @@ -185,10 +184,7 @@ public void testSynthesizeIdRandom() throws Exception { assertThat(expectedIDs, empty()); } - private static CheckedConsumer indexAndForceMerge( - IndexRouting.ExtractFromSource routing, - List docs - ) { + private static CheckedConsumer indexAndForceMerge(IndexRouting.ExtractFromSource routing, List docs) { return writer -> { for (Doc doc : docs) { indexDoc(routing, writer, doc); @@ -198,7 +194,7 @@ private static CheckedConsumer indexAndForceMerg } private void prepareIndexReader( - CheckedConsumer buildIndex, + CheckedConsumer buildIndex, CheckedConsumer verify, boolean noMergePolicy ) throws IOException { @@ -212,7 +208,7 @@ private void prepareIndexReader( new SortedNumericSortField(DataStreamTimestampFieldMapper.DEFAULT_PATH, SortField.Type.LONG, true) ); config.setIndexSort(sort); - RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config); + IndexWriter indexWriter = new IndexWriter(directory, config); buildIndex.accept(indexWriter); indexWriter.close(); @@ -222,7 +218,7 @@ private void prepareIndexReader( } } - private static void indexDoc(IndexRouting.ExtractFromSource routing, RandomIndexWriter iw, Doc doc) throws IOException { + private static void indexDoc(IndexRouting.ExtractFromSource routing, IndexWriter iw, Doc doc) throws IOException { final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(routing.builder()); final List fields = new ArrayList<>(); From b31715db0113a3648d4eff0547942cb17ac28b03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Tue, 10 Oct 2023 09:05:19 +0200 Subject: [PATCH 099/176] Moving ClusterStatsMonitoringDoc to opaque string versions (#100328) Moving ClusterStatsMonitoringDoc to Build version info + ClusterStatsNodes to String version --- .../admin/cluster/stats/ClusterStatsIT.java | 26 +++++++++++-------- .../cluster/stats/ClusterStatsNodes.java | 11 ++++---- .../cluster/ClusterStatsCollector.java | 4 +-- .../cluster/ClusterStatsCollectorTests.java | 4 +-- .../ClusterStatsMonitoringDocTests.java | 3 +-- 5 files changed, 25 insertions(+), 23 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index dbd9cb7e45f6b..902f74ef778a0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -41,7 +41,10 @@ import java.util.concurrent.ExecutionException; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) @@ -201,22 +204,23 @@ public void testValuesSmokeScreen() throws IOException, ExecutionException, Inte ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get(); String msg = response.toString(); - assertThat(msg, response.getTimestamp(), Matchers.greaterThan(946681200000L)); // 1 Jan 2000 - assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0L)); + assertThat(msg, response.getTimestamp(), greaterThan(946681200000L)); // 1 Jan 2000 + assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), greaterThan(0L)); - assertThat(msg, response.nodesStats.getFs().getTotal().getBytes(), Matchers.greaterThan(0L)); - assertThat(msg, response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0)); + assertThat(msg, response.nodesStats.getFs().getTotal().getBytes(), greaterThan(0L)); + assertThat(msg, response.nodesStats.getJvm().getVersions().size(), greaterThan(0)); - assertThat(msg, response.nodesStats.getVersions().size(), Matchers.greaterThan(0)); - assertThat(msg, response.nodesStats.getVersions().contains(Version.CURRENT), Matchers.equalTo(true)); - assertThat(msg, response.nodesStats.getPlugins().size(), Matchers.greaterThanOrEqualTo(0)); + assertThat(msg, response.nodesStats.getVersions(), hasSize(greaterThan(0))); + // TODO: Build.current().unqualifiedVersion() -- or Build.current().version() if/when we move NodeInfo to Build version(s) + assertThat(msg, response.nodesStats.getVersions(), hasItem(Version.CURRENT.toString())); + assertThat(msg, response.nodesStats.getPlugins(), hasSize(greaterThanOrEqualTo(0))); - assertThat(msg, response.nodesStats.getProcess().count, Matchers.greaterThan(0)); + assertThat(msg, response.nodesStats.getProcess().count, greaterThan(0)); // 0 happens when not supported on platform - assertThat(msg, response.nodesStats.getProcess().getAvgOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(0L)); + assertThat(msg, response.nodesStats.getProcess().getAvgOpenFileDescriptors(), greaterThanOrEqualTo(0L)); // these can be -1 if not supported on platform - assertThat(msg, response.nodesStats.getProcess().getMinOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L)); - assertThat(msg, response.nodesStats.getProcess().getMaxOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L)); + assertThat(msg, response.nodesStats.getProcess().getMinOpenFileDescriptors(), greaterThanOrEqualTo(-1L)); + assertThat(msg, response.nodesStats.getProcess().getMaxOpenFileDescriptors(), greaterThanOrEqualTo(-1L)); NodesStatsResponse nodesStatsResponse = clusterAdmin().prepareNodesStats().setOs(true).get(); long total = 0; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index 5f94dbf9d76a2..225f4226a5db8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -48,7 +47,7 @@ public class ClusterStatsNodes implements ToXContentFragment { private final Counts counts; - private final Set versions; + private final Set versions; private final OsStats os; private final ProcessStats process; private final JvmStats jvm; @@ -71,7 +70,7 @@ public class ClusterStatsNodes implements ToXContentFragment { for (ClusterStatsNodeResponse nodeResponse : nodeResponses) { nodeInfos.add(nodeResponse.nodeInfo()); nodeStats.add(nodeResponse.nodeStats()); - this.versions.add(nodeResponse.nodeInfo().getVersion()); + this.versions.add(nodeResponse.nodeInfo().getVersion().toString()); this.plugins.addAll(nodeResponse.nodeInfo().getInfo(PluginsAndModules.class).getPluginInfos()); TransportAddress publishAddress = nodeResponse.nodeInfo().getInfo(TransportInfo.class).address().publishAddress(); @@ -95,7 +94,7 @@ public Counts getCounts() { return this.counts; } - public Set getVersions() { + public Set getVersions() { return versions; } @@ -137,8 +136,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); builder.startArray(Fields.VERSIONS); - for (Version v : versions) { - builder.value(v.toString()); + for (var v : versions) { + builder.value(v); } builder.endArray(); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java index 4d8d2ce45fb48..1d84235c3deeb 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollector.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.monitoring.collector.cluster; import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.Client; @@ -92,7 +92,7 @@ protected Collection doCollect(final MonitoringDoc.Node node, fin final String clusterName = clusterService.getClusterName().value(); final String clusterUuid = clusterUuid(clusterState); - final String version = Version.CURRENT.toString(); + final String version = Build.current().version(); final License license = licenseService.getLicense(); final List xpackUsage = collect(usageSupplier); final boolean apmIndicesExist = doAPMIndicesExist(clusterState); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollectorTests.java index cbc370496fbd7..504dbc65eac2a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollectorTests.java @@ -6,8 +6,8 @@ */ package org.elasticsearch.xpack.monitoring.collector.cluster; +import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsIndices; @@ -266,7 +266,7 @@ public void testDoCollect() throws Exception { assertThat(document.getId(), nullValue()); assertThat(document.getClusterName(), equalTo(clusterName)); - assertThat(document.getVersion(), equalTo(Version.CURRENT.toString())); + assertThat(document.getVersion(), equalTo(Build.current().version())); assertThat(document.getLicense(), equalTo(license)); assertThat(document.getStatus(), equalTo(clusterStatus)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index 0d554eb1e8bf5..7480666c408e6 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -56,7 +56,6 @@ import org.elasticsearch.plugins.PluginDescriptor; import org.elasticsearch.plugins.PluginRuntimeInfo; import org.elasticsearch.test.BuildUtils; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.transport.TransportInfo; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.XPackFeatureSet; @@ -102,7 +101,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase Date: Tue, 10 Oct 2023 10:47:35 +0200 Subject: [PATCH 100/176] [DOCS] Refresh "Search your data" (#99482) * Restructure existing docs * Add draft content * Changes for MVP * Reword * Move Search Applications docs to ES reference - Renamed files and changed ids per https://github.com/elastic/elasticsearch/pull/100032 - Updated URL syntax for absolute URLs using attribute - Deleted redirects in redirects.asciidoc * Fix json source formatting * Use `source, js`, not `javascript` * Idem * Fix console-reponse * Skip tests for js blocks * This will definitely fix things * Use attributes * Remove commented out redirects * Fix header level in search-with-synonyms.asciidoc * Update docs/reference/search/search-your-data/knn-search.asciidoc Co-authored-by: Chris Cressman * Fix trailing comma bug Flagged in #enterprise-search Slack * Move semantic search under vector search --------- Co-authored-by: Liam Thompson Co-authored-by: Chris Cressman Co-authored-by: Liam Thompson <32779855+leemthompo@users.noreply.github.com> --- docs/reference/redirects.asciidoc | 20 - .../collapse-search-results.asciidoc | 10 +- .../filter-search-results.asciidoc | 10 +- .../search-your-data/highlighting.asciidoc | 42 +- .../long-running-searches.asciidoc | 2 +- .../search-your-data/near-real-time.asciidoc | 2 +- .../paginate-search-results.asciidoc | 2 +- .../retrieve-inner-hits.asciidoc | 12 +- .../retrieve-selected-fields.asciidoc | 24 +- .../search-your-data/search-api.asciidoc | 521 +++++++++++ .../search-application-api.asciidoc | 651 ++++++++++++++ .../search-application-client.asciidoc | 808 ++++++++++++++++++ .../search-application-overview.asciidoc | 132 +++ .../search-application-security.asciidoc | 241 ++++++ .../search-multiple-indices.asciidoc | 4 +- .../search-shard-routing.asciidoc | 10 +- .../search-your-data/search-template.asciidoc | 41 +- .../search-using-query-rules.asciidoc | 14 +- .../search-with-synonyms.asciidoc | 2 +- .../search-your-data.asciidoc | 538 +----------- .../sort-search-results.asciidoc | 38 +- 21 files changed, 2486 insertions(+), 638 deletions(-) create mode 100644 docs/reference/search/search-your-data/search-api.asciidoc create mode 100644 docs/reference/search/search-your-data/search-application-api.asciidoc create mode 100644 docs/reference/search/search-your-data/search-application-client.asciidoc create mode 100644 docs/reference/search/search-your-data/search-application-overview.asciidoc create mode 100644 docs/reference/search/search-your-data/search-application-security.asciidoc diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 4ec8c203bbef9..702e7dbc8c60e 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1982,23 +1982,3 @@ coming::[8.11.0] === Set up CORS for Behavioral Analytics coming::[8.11.0] - -[role="exclude",id="search-application-overview"] -=== Elastic Search Applications - -coming::[8.11.0] - -[role="exclude",id="search-application-api"] -=== Search Applications search API and templates - -coming::[8.11.0] - -[role="exclude",id="search-application-client"] -=== Search Applications client - -coming::[8.11.0] - -[role="exclude",id="search-application-security"] -=== Search Applications security - -coming::[8.11.0] diff --git a/docs/reference/search/search-your-data/collapse-search-results.asciidoc b/docs/reference/search/search-your-data/collapse-search-results.asciidoc index 1c2a190a779cd..ffb6238c89e10 100644 --- a/docs/reference/search/search-your-data/collapse-search-results.asciidoc +++ b/docs/reference/search/search-your-data/collapse-search-results.asciidoc @@ -1,5 +1,5 @@ [[collapse-search-results]] -== Collapse search results +=== Collapse search results You can use the `collapse` parameter to collapse search results based on field values. The collapsing is done by selecting only the top sorted @@ -45,7 +45,7 @@ NOTE: Collapsing is applied to the top hits only and does not affect aggregation [discrete] [[expand-collapse-results]] -=== Expand collapse results +==== Expand collapse results It is also possible to expand each collapsed top hits with the `inner_hits` option. @@ -150,7 +150,7 @@ WARNING: `collapse` cannot be used in conjunction with <> parameter. Using `search_after` is only supported when sorting and collapsing on the same field. Secondary sorts are also not allowed. For example, we can @@ -177,7 +177,7 @@ GET /my-index-000001/_search [discrete] [[second-level-of-collapsing]] -=== Second level of collapsing +==== Second level of collapsing A second level of collapsing is also supported and is applied to `inner_hits`. @@ -313,7 +313,7 @@ GET /my-index-000001/_search ---- [discrete] -=== Track Scores +==== Track Scores When `collapse` is used with `sort` on a field, scores are not computed. Setting `track_scores` to true instructs {es} to compute and track scores. diff --git a/docs/reference/search/search-your-data/filter-search-results.asciidoc b/docs/reference/search/search-your-data/filter-search-results.asciidoc index abc2749cb9b1b..024949cf19ea1 100644 --- a/docs/reference/search/search-your-data/filter-search-results.asciidoc +++ b/docs/reference/search/search-your-data/filter-search-results.asciidoc @@ -1,5 +1,5 @@ [[filter-search-results]] -== Filter search results +=== Filter search results You can use two methods to filter search results: @@ -17,7 +17,7 @@ improve relevance and reorder results. [discrete] [[post-filter]] -=== Post filter +==== Post filter When you use the `post_filter` parameter to filter search results, the search hits are filtered after the aggregations are calculated. A post filter has no @@ -149,7 +149,7 @@ GET /shirts/_search [discrete] [[rescore]] -=== Rescore filtered search results +==== Rescore filtered search results Rescoring can help to improve precision by reordering just the top (eg 100 - 500) documents returned by the @@ -175,7 +175,7 @@ confusingly shift as the user steps through pages. [discrete] [[query-rescorer]] -==== Query rescorer +===== Query rescorer The query rescorer executes a second query only on the Top-K results returned by the <> and @@ -236,7 +236,7 @@ for <> rescores. [discrete] [[multiple-rescores]] -==== Multiple rescores +===== Multiple rescores It is also possible to execute multiple rescores in sequence: diff --git a/docs/reference/search/search-your-data/highlighting.asciidoc b/docs/reference/search/search-your-data/highlighting.asciidoc index 6954f91b99beb..55e737eb00197 100644 --- a/docs/reference/search/search-your-data/highlighting.asciidoc +++ b/docs/reference/search/search-your-data/highlighting.asciidoc @@ -1,5 +1,5 @@ [[highlighting]] -== Highlighting +=== Highlighting Highlighters enable you to get highlighted snippets from one or more fields in your search results so you can show users where the query matches are. @@ -42,7 +42,7 @@ for each field. [discrete] [[unified-highlighter]] -=== Unified highlighter +==== Unified highlighter The `unified` highlighter uses the Lucene Unified Highlighter. This highlighter breaks the text into sentences and uses the BM25 algorithm to score individual sentences as if they were documents in the corpus. It also supports @@ -51,7 +51,7 @@ default highlighter. [discrete] [[plain-highlighter]] -=== Plain highlighter +==== Plain highlighter The `plain` highlighter uses the standard Lucene highlighter. It attempts to reflect the query matching logic in terms of understanding word importance and any word positioning criteria in phrase queries. @@ -67,7 +67,7 @@ queries, we recommend using the `unified` highlighter on `postings` or `term_vec [discrete] [[fast-vector-highlighter]] -=== Fast vector highlighter +==== Fast vector highlighter The `fvh` highlighter uses the Lucene Fast Vector highlighter. This highlighter can be used on fields with `term_vector` set to `with_positions_offsets` in the mapping. The fast vector highlighter: @@ -87,7 +87,7 @@ span queries, try an alternative highlighter, such as the `unified` highlighter. [discrete] [[offsets-strategy]] -=== Offsets strategy +==== Offsets strategy To create meaningful search snippets from the terms being queried, the highlighter needs to know the start and end character offsets of each word in the original text. These offsets can be obtained from: @@ -121,7 +121,7 @@ for a particular index with the index setting <> * <> @@ -313,7 +313,7 @@ GET /_search [discrete] [[specify-highlight-query]] -== Specify a highlight query +=== Specify a highlight query You can specify a `highlight_query` to take additional information into account when highlighting. For example, the following query includes both the search @@ -382,7 +382,7 @@ GET /_search [discrete] [[set-highlighter-type]] -== Set highlighter type +=== Set highlighter type The `type` field allows to force a specific highlighter type. The allowed values are: `unified`, `plain` and `fvh`. @@ -406,7 +406,7 @@ GET /_search [[configure-tags]] [discrete] -== Configure highlighting tags +=== Configure highlighting tags By default, the highlighting will wrap highlighted text in `` and ``. This can be controlled by setting `pre_tags` and `post_tags`, @@ -472,7 +472,7 @@ GET /_search [[highlight-all]] [discrete] -== Highlight in all fields +=== Highlight in all fields By default, only fields that contains a query match are highlighted. Set `require_field_match` to `false` to highlight all fields. @@ -496,7 +496,7 @@ GET /_search [[matched-fields]] [discrete] -== Combine matches on multiple fields +=== Combine matches on multiple fields WARNING: This is only supported by the `fvh` highlighter @@ -630,7 +630,7 @@ to [[explicit-field-order]] [discrete] -== Explicitly order highlighted fields +=== Explicitly order highlighted fields Elasticsearch highlights the fields in the order that they are sent, but per the JSON spec, objects are unordered. If you need to be explicit about the order in which fields are highlighted specify the `fields` as an array: @@ -657,7 +657,7 @@ fields are highlighted but a plugin might. [discrete] [[control-highlighted-frags]] -== Control highlighted fragments +=== Control highlighted fragments Each field highlighted can control the size of the highlighted fragment in characters (defaults to `100`), and the maximum number of fragments @@ -754,7 +754,7 @@ GET /_search [discrete] [[highlight-postings-list]] -== Highlight using the postings list +=== Highlight using the postings list Here is an example of setting the `comment` field in the index mapping to allow for highlighting using the postings: @@ -794,7 +794,7 @@ PUT /example [discrete] [[specify-fragmenter]] -== Specify a fragmenter for the plain highlighter +=== Specify a fragmenter for the plain highlighter When using the `plain` highlighter, you can choose between the `simple` and `span` fragmenters: @@ -915,7 +915,7 @@ This is useful for highlighting the entire contents of a document or field. [discrete] [[how-es-highlighters-work-internally]] -== How highlighters work internally +=== How highlighters work internally Given a query and a text (the content of a document field), the goal of a highlighter is to find the best text fragments for the query, and highlight @@ -927,7 +927,7 @@ address several questions: - How to highlight the query terms in a fragment? [discrete] -=== How to break a text into fragments? +==== How to break a text into fragments? Relevant settings: `fragment_size`, `fragmenter`, `type` of highlighter, `boundary_chars`, `boundary_max_scan`, `boundary_scanner`, `boundary_scanner_locale`. @@ -945,7 +945,7 @@ fragments by utilizing Java's `BreakIterator`. This ensures that a fragment is a valid sentence as long as `fragment_size` allows for this. [discrete] -=== How to find the best fragments? +==== How to find the best fragments? Relevant settings: `number_of_fragments`. To find the best, most relevant, fragments, a highlighter needs to score @@ -978,7 +978,7 @@ an in-memory index from the text. Unified highlighter uses the BM25 scoring mode to score fragments. [discrete] -=== How to highlight the query terms in a fragment? +==== How to highlight the query terms in a fragment? Relevant settings: `pre-tags`, `post-tags`. The goal is to highlight only those terms that participated in generating the 'hit' on the document. @@ -995,7 +995,7 @@ fragments in some raw form, and then populate them with actual text. A highlighter uses `pre-tags`, `post-tags` to encode highlighted terms. [discrete] -=== An example of the work of the unified highlighter +==== An example of the work of the unified highlighter Let's look in more details how unified highlighter works. diff --git a/docs/reference/search/search-your-data/long-running-searches.asciidoc b/docs/reference/search/search-your-data/long-running-searches.asciidoc index 3f42bbfbf39e3..5f779633a0fa7 100644 --- a/docs/reference/search/search-your-data/long-running-searches.asciidoc +++ b/docs/reference/search/search-your-data/long-running-searches.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [[async-search-intro]] -== Long-running searches +=== Long-running searches {es} generally allows you to quickly search across big amounts of data. There are situations where a search executes on many shards, possibly against diff --git a/docs/reference/search/search-your-data/near-real-time.asciidoc b/docs/reference/search/search-your-data/near-real-time.asciidoc index d7bb3d20f905d..46a996c237c38 100644 --- a/docs/reference/search/search-your-data/near-real-time.asciidoc +++ b/docs/reference/search/search-your-data/near-real-time.asciidoc @@ -1,7 +1,7 @@ :xrefstyle: short [[near-real-time]] -== Near real-time search +=== Near real-time search The overview of <> indicates that when a document is stored in {es}, it is indexed and fully searchable in _near real-time_--within 1 second. What defines near real-time search? Lucene, the Java libraries on which {es} is based, introduced the concept of per-segment search. A _segment_ is similar to an inverted index, but the word _index_ in Lucene means "a collection of segments plus a commit point". After a commit, a new segment is added to the commit point and the buffer is cleared. diff --git a/docs/reference/search/search-your-data/paginate-search-results.asciidoc b/docs/reference/search/search-your-data/paginate-search-results.asciidoc index 1f357f6fe500b..a81598273dfd3 100644 --- a/docs/reference/search/search-your-data/paginate-search-results.asciidoc +++ b/docs/reference/search/search-your-data/paginate-search-results.asciidoc @@ -1,5 +1,5 @@ [[paginate-search-results]] -== Paginate search results +=== Paginate search results By default, searches return the top 10 matching hits. To page through a larger set of results, you can use the <>'s `from` and `size` diff --git a/docs/reference/search/search-your-data/retrieve-inner-hits.asciidoc b/docs/reference/search/search-your-data/retrieve-inner-hits.asciidoc index 7daf8afcf1c01..89ab858f89edc 100644 --- a/docs/reference/search/search-your-data/retrieve-inner-hits.asciidoc +++ b/docs/reference/search/search-your-data/retrieve-inner-hits.asciidoc @@ -1,5 +1,5 @@ [[inner-hits]] -== Retrieve inner hits +=== Retrieve inner hits The <> and <> features allow the return of documents that have matches in a different scope. In the parent/child case, parent documents are returned based on matches in child @@ -56,7 +56,7 @@ If `inner_hits` is defined on a query that supports it then each search hit will [discrete] [[inner-hits-options]] -=== Options +==== Options Inner hits support the following options: @@ -82,7 +82,7 @@ Inner hits also supports the following per document features: [discrete] [[nested-inner-hits]] -=== Nested inner hits +==== Nested inner hits The nested `inner_hits` can be used to include nested inner objects as inner hits to a search hit. @@ -199,7 +199,7 @@ document that contained the comment. [discrete] [[nested-inner-hits-source]] -==== Nested inner hits and +_source+ +===== Nested inner hits and +_source+ Nested document don't have a `_source` field, because the entire source of document is stored with the root document under its `_source` field. To include the source of just the nested document, the source of the root document is parsed and just @@ -314,7 +314,7 @@ Response not included in text but tested for completeness sake. [discrete] [[hierarchical-nested-inner-hits]] -=== Hierarchical levels of nested object fields and inner hits. +==== Hierarchical levels of nested object fields and inner hits. If a mapping has multiple levels of hierarchical nested object fields each level can be accessed via dot notated path. For example if there is a `comments` nested field that contains a `votes` nested field and votes should directly be returned @@ -434,7 +434,7 @@ This indirect referencing is only supported for nested inner hits. [discrete] [[parent-child-inner-hits]] -=== Parent/child inner hits +==== Parent/child inner hits The parent/child `inner_hits` can be used to include parent or child: diff --git a/docs/reference/search/search-your-data/retrieve-selected-fields.asciidoc b/docs/reference/search/search-your-data/retrieve-selected-fields.asciidoc index 140aeb39723f6..41f54f7db7a77 100644 --- a/docs/reference/search/search-your-data/retrieve-selected-fields.asciidoc +++ b/docs/reference/search/search-your-data/retrieve-selected-fields.asciidoc @@ -1,5 +1,5 @@ [[search-fields]] -== Retrieve selected fields from a search +=== Retrieve selected fields from a search ++++ Retrieve selected fields ++++ @@ -20,7 +20,7 @@ retrieving data. [discrete] [[search-fields-param]] -=== The `fields` option +==== The `fields` option To retrieve specific fields in the search response, use the `fields` parameter. // tag::fields-param-desc[] Because it consults the index mappings, the `fields` parameter provides several @@ -46,7 +46,7 @@ Selected fields that can't be found in `_source` are skipped. [discrete] [[search-fields-request]] -==== Retrieve specific fields +===== Retrieve specific fields The following search request uses the `fields` parameter to retrieve values for the `user.id` field, all fields starting with `http.response.`, and the `@timestamp` field. @@ -89,7 +89,7 @@ However, when explicitly requested using the field name, the `_id`, `_routing`, [discrete] [[search-fields-response]] -==== Response always returns an array +===== Response always returns an array The `fields` response always returns an array of values for each field, even when there is a single value in the `_source`. This is because {es} has @@ -140,7 +140,7 @@ fields are returned. [discrete] [[search-fields-nested]] -==== Retrieve nested fields +===== Retrieve nested fields [%collapsible] ==== @@ -300,7 +300,7 @@ values will be returned because the pattern doesn't match any leaf fields. [discrete] [[retrieve-unmapped-fields]] -==== Retrieve unmapped fields +===== Retrieve unmapped fields [%collapsible] ==== @@ -392,7 +392,7 @@ won't be included in the response because `include_unmapped` isn't set to [discrete] [[ignored-field-values]] -==== Ignored field values +===== Ignored field values [%collapsible] ==== @@ -570,7 +570,7 @@ GET /_search [discrete] [[field-retrieval-methods]] -=== Other methods of retrieving data +==== Other methods of retrieving data .Using `fields` is typically better **** @@ -598,7 +598,7 @@ implications of source filtering where {es} needs to load and parse the entire [discrete] [[docvalue-fields]] -==== Doc value fields +===== Doc value fields You can use the <> parameter to return <> for one or more fields in the search response. @@ -652,7 +652,7 @@ property. [discrete] [[stored-fields]] -==== Stored fields +===== Stored fields It's also possible to store an individual field's values by using the <> mapping option. You can use the @@ -706,7 +706,7 @@ must be used within an <> block. [discrete] [[disable-stored-fields]] -===== Disable stored fields +====== Disable stored fields To disable the stored fields (and metadata fields) entirely use: `_none_`: @@ -725,7 +725,7 @@ NOTE: <> and <> (based on different fields) for each hit. For example: diff --git a/docs/reference/search/search-your-data/search-api.asciidoc b/docs/reference/search/search-your-data/search-api.asciidoc new file mode 100644 index 0000000000000..f3e271918b9b2 --- /dev/null +++ b/docs/reference/search/search-your-data/search-api.asciidoc @@ -0,0 +1,521 @@ +[[search-your-data]] +== The search API + +A _search_ consists of one or more queries that are combined and sent to {es}. +Documents that match a search's queries are returned in the _hits_, or +_search results_, of the response. + +A search may also contain additional information used to better process its +queries. For example, a search may be limited to a specific index or only return +a specific number of results. + +You can use the <> to search and +<> data stored in {es} data streams or indices. +The API's `query` request body parameter accepts queries written in +<>. + +[discrete] +[[run-an-es-search]] +=== Run a search + +The following request searches `my-index-000001` using a +<> query. This query matches documents with a +`user.id` value of `kimchy`. + +[source,console] +---- +GET /my-index-000001/_search +{ + "query": { + "match": { + "user.id": "kimchy" + } + } +} +---- +// TEST[setup:my_index] + +The API response returns the top 10 documents matching the query in the +`hits.hits` property. + +[source,console-result] +---- +{ + "took": 5, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 1.3862942, + "hits": [ + { + "_index": "my-index-000001", + "_id": "kxWFcnMByiguvud1Z8vC", + "_score": 1.3862942, + "_source": { + "@timestamp": "2099-11-15T14:12:12", + "http": { + "request": { + "method": "get" + }, + "response": { + "bytes": 1070000, + "status_code": 200 + }, + "version": "1.1" + }, + "message": "GET /search HTTP/1.1 200 1070000", + "source": { + "ip": "127.0.0.1" + }, + "user": { + "id": "kimchy" + } + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took": 5/"took": "$body.took"/] +// TESTRESPONSE[s/"_id": "kxWFcnMByiguvud1Z8vC"/"_id": "$body.hits.hits.0._id"/] + +[discrete] +[[common-search-options]] +=== Common search options + +You can use the following options to customize your searches. + +*Query DSL* + +<> supports a variety of query types you can mix and match +to get the results you want. Query types include: + +* <> and other <>, which let you combine queries and match results based on multiple +criteria +* <> for filtering and finding exact matches +* <>, which are commonly used in search +engines +* <> and <> + +*Aggregations* + +You can use <> to get statistics and +other analytics for your search results. Aggregations help you answer questions +like: + +* What's the average response time for my servers? +* What are the top IP addresses hit by users on my network? +* What is the total transaction revenue by customer? + +*Search multiple data streams and indices* + +You can use comma-separated values and grep-like index patterns to search +several data streams and indices in the same request. You can even boost search +results from specific indices. See <>. + +*Paginate search results* + +By default, searches return only the top 10 matching hits. To retrieve +more or fewer documents, see <>. + +*Retrieve selected fields* + +The search response's `hits.hits` property includes the full document +<> for each hit. To retrieve only a subset of +the `_source` or other fields, see <>. + +*Sort search results* + +By default, search hits are sorted by `_score`, a <> that measures how well each document matches the query. To customize the +calculation of these scores, use the +<> query. To sort search hits by +other field values, see <>. + +*Run an async search* + +{es} searches are designed to run on large volumes of data quickly, often +returning results in milliseconds. For this reason, searches are +_synchronous_ by default. The search request waits for complete results before +returning a response. + +However, complete results can take longer for searches across +large data sets or <>. + +To avoid long waits, you can run an _asynchronous_, or _async_, search +instead. An <> lets you retrieve partial +results for a long-running search now and get complete results later. + +[discrete] +[[run-search-runtime-fields]] +=== Define fields that exist only in a query +Instead of indexing your data and then searching it, you can define +<> that only exist as part of your +search query. You specify a `runtime_mappings` section in your search request +to define the runtime field, which can optionally include a Painless script. + +For example, the following query defines a runtime field called `day_of_week`. +The included script calculates the day of the week based on the value of the +`@timestamp` field, and uses `emit` to return the calculated value. + +The query also includes a <> that operates on `day_of_week`. + +[source,console] +---- +GET /my-index-000001/_search +{ + "runtime_mappings": { + "day_of_week": { + "type": "keyword", + "script": { + "source": + """emit(doc['@timestamp'].value.dayOfWeekEnum + .getDisplayName(TextStyle.FULL, Locale.ROOT))""" + } + } + }, + "aggs": { + "day_of_week": { + "terms": { + "field": "day_of_week" + } + } + } +} +---- +// TEST[setup:my_index] + +The response includes an aggregation based on the `day_of_week` runtime field. +Under `buckets` is a `key` with a value of `Sunday`. The query dynamically +calculated this value based on the script defined in the `day_of_week` runtime +field without ever indexing the field. + +[source,console-result] +---- +{ + ... + *** + "aggregations" : { + "day_of_week" : { + "doc_count_error_upper_bound" : 0, + "sum_other_doc_count" : 0, + "buckets" : [ + { + "key" : "Sunday", + "doc_count" : 5 + } + ] + } + } +} +---- +// TESTRESPONSE[s/\.\.\./"took" : $body.took,"timed_out" : $body.timed_out,"_shards" : $body._shards,/] +// TESTRESPONSE[s/\*\*\*/"hits" : $body.hits,/] + +[discrete] +[[search-timeout]] +=== Search timeout + +By default, search requests don't time out. The request waits for complete +results from each shard before returning a response. + +While <> is designed for long-running +searches, you can also use the `timeout` parameter to specify a duration you'd +like to wait on each shard to complete. Each shard collects hits within the +specified time period. If collection isn't finished when the period ends, {es} +uses only the hits accumulated up to that point. The overall latency of a search +request depends on the number of shards needed for the search and the number of +concurrent shard requests. + +[source,console] +---- +GET /my-index-000001/_search +{ + "timeout": "2s", + "query": { + "match": { + "user.id": "kimchy" + } + } +} +---- +// TEST[setup:my_index] + +To set a cluster-wide default timeout for all search requests, configure +`search.default_search_timeout` using the <>. This global timeout duration is used if no `timeout` argument is +passed in the request. If the global search timeout expires before the search +request finishes, the request is cancelled using <>. The `search.default_search_timeout` setting defaults to `-1` (no +timeout). + +[discrete] +[[global-search-cancellation]] +=== Search cancellation + +You can cancel a search request using the <>. {es} also automatically cancels a search request when your client's HTTP +connection closes. We recommend you set up your client to close HTTP connections +when a search request is aborted or times out. + +[discrete] +[[track-total-hits]] +=== Track total hits + +Generally the total hit count can't be computed accurately without visiting all +matches, which is costly for queries that match lots of documents. The +`track_total_hits` parameter allows you to control how the total number of hits +should be tracked. +Given that it is often enough to have a lower bound of the number of hits, +such as "there are at least 10000 hits", the default is set to `10,000`. +This means that requests will count the total hit accurately up to `10,000` hits. +It is a good trade off to speed up searches if you don't need the accurate number +of hits after a certain threshold. + +When set to `true` the search response will always track the number of hits that +match the query accurately (e.g. `total.relation` will always be equal to `"eq"` +when `track_total_hits` is set to true). Otherwise the `"total.relation"` returned +in the `"total"` object in the search response determines how the `"total.value"` +should be interpreted. A value of `"gte"` means that the `"total.value"` is a +lower bound of the total hits that match the query and a value of `"eq"` indicates +that `"total.value"` is the accurate count. + +[source,console] +-------------------------------------------------- +GET my-index-000001/_search +{ + "track_total_hits": true, + "query": { + "match" : { + "user.id" : "elkbee" + } + } +} +-------------------------------------------------- +// TEST[setup:my_index] + +\... returns: + +[source,console-result] +-------------------------------------------------- +{ + "_shards": ... + "timed_out": false, + "took": 100, + "hits": { + "max_score": 1.0, + "total" : { + "value": 2048, <1> + "relation": "eq" <2> + }, + "hits": ... + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards",/] +// TESTRESPONSE[s/"took": 100/"took": $body.took/] +// TESTRESPONSE[s/"max_score": 1\.0/"max_score": $body.hits.max_score/] +// TESTRESPONSE[s/"value": 2048/"value": $body.hits.total.value/] +// TESTRESPONSE[s/"hits": \.\.\./"hits": "$body.hits.hits"/] + +<1> The total number of hits that match the query. +<2> The count is accurate (e.g. `"eq"` means equals). + +It is also possible to set `track_total_hits` to an integer. +For instance the following query will accurately track the total hit count that match +the query up to 100 documents: + +[source,console] +-------------------------------------------------- +GET my-index-000001/_search +{ + "track_total_hits": 100, + "query": { + "match": { + "user.id": "elkbee" + } + } +} +-------------------------------------------------- +// TEST[continued] + +The `hits.total.relation` in the response will indicate if the +value returned in `hits.total.value` is accurate (`"eq"`) or a lower +bound of the total (`"gte"`). + +For instance the following response: + +[source,console-result] +-------------------------------------------------- +{ + "_shards": ... + "timed_out": false, + "took": 30, + "hits": { + "max_score": 1.0, + "total": { + "value": 42, <1> + "relation": "eq" <2> + }, + "hits": ... + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards",/] +// TESTRESPONSE[s/"took": 30/"took": $body.took/] +// TESTRESPONSE[s/"max_score": 1\.0/"max_score": $body.hits.max_score/] +// TESTRESPONSE[s/"value": 42/"value": $body.hits.total.value/] +// TESTRESPONSE[s/"hits": \.\.\./"hits": "$body.hits.hits"/] + +<1> 42 documents match the query +<2> and the count is accurate (`"eq"`) + +\... indicates that the number of hits returned in the `total` +is accurate. + +If the total number of hits that match the query is greater than the +value set in `track_total_hits`, the total hits in the response +will indicate that the returned value is a lower bound: + +[source,console-result] +-------------------------------------------------- +{ + "_shards": ... + "hits": { + "max_score": 1.0, + "total": { + "value": 100, <1> + "relation": "gte" <2> + }, + "hits": ... + } +} +-------------------------------------------------- +// TESTRESPONSE[skip:response is already tested in the previous snippet] + +<1> There are at least 100 documents that match the query +<2> This is a lower bound (`"gte"`). + +If you don't need to track the total number of hits at all you can improve query +times by setting this option to `false`: + +[source,console] +-------------------------------------------------- +GET my-index-000001/_search +{ + "track_total_hits": false, + "query": { + "match": { + "user.id": "elkbee" + } + } +} +-------------------------------------------------- +// TEST[continued] + +\... returns: + +[source,console-result] +-------------------------------------------------- +{ + "_shards": ... + "timed_out": false, + "took": 10, + "hits": { <1> + "max_score": 1.0, + "hits": ... + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards",/] +// TESTRESPONSE[s/"took": 10/"took": $body.took/] +// TESTRESPONSE[s/"max_score": 1\.0/"max_score": $body.hits.max_score/] +// TESTRESPONSE[s/"hits": \.\.\./"hits": "$body.hits.hits"/] + +<1> The total number of hits is unknown. + +Finally you can force an accurate count by setting `"track_total_hits"` +to `true` in the request. + +[discrete] +[[quickly-check-for-matching-docs]] +=== Quickly check for matching docs + +If you only want to know if there are any documents matching a +specific query, you can set the `size` to `0` to indicate that we are not +interested in the search results. You can also set `terminate_after` to `1` +to indicate that the query execution can be terminated whenever the first +matching document was found (per shard). + +[source,console] +-------------------------------------------------- +GET /_search?q=user.id:elkbee&size=0&terminate_after=1 +-------------------------------------------------- +// TEST[setup:my_index] + +NOTE: `terminate_after` is always applied **after** the +<> and stops the query as well as the aggregation +executions when enough hits have been collected on the shard. Though the doc +count on aggregations may not reflect the `hits.total` in the response since +aggregations are applied **before** the post filtering. + +The response will not contain any hits as the `size` was set to `0`. The +`hits.total` will be either equal to `0`, indicating that there were no +matching documents, or greater than `0` meaning that there were at least +as many documents matching the query when it was early terminated. +Also if the query was terminated early, the `terminated_early` flag will +be set to `true` in the response. Some queries are able to retrieve the hits +count directly from the index statistics, which is much faster as it does +not require executing the query. In those situations, no documents are +collected, the returned `total.hits` will be higher than `terminate_after`, +and `terminated_early` will be set to `false`. + +[source,console-result] +-------------------------------------------------- +{ + "took": 3, + "timed_out": false, + "terminated_early": true, + "_shards": { + "total": 1, + "successful": 1, + "skipped" : 0, + "failed": 0 + }, + "hits": { + "total" : { + "value": 1, + "relation": "eq" + }, + "max_score": null, + "hits": [] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 3/"took": $body.took/] +// TESTRESPONSE[s/"terminated_early": true/"terminated_early": $body.terminated_early/] +// TESTRESPONSE[s/"value": 1/"value": $body.hits.total.value/] + +The `took` time in the response contains the milliseconds that this request +took for processing, beginning quickly after the node received the query, up +until all search related work is done and before the above JSON is returned +to the client. This means it includes the time spent waiting in thread pools, +executing a distributed search across the whole cluster and gathering all the +results. + +include::sort-search-results.asciidoc[] +include::paginate-search-results.asciidoc[] +include::retrieve-selected-fields.asciidoc[] +include::search-multiple-indices.asciidoc[] +include::collapse-search-results.asciidoc[] +include::filter-search-results.asciidoc[] +include::highlighting.asciidoc[] +include::long-running-searches.asciidoc[] +include::near-real-time.asciidoc[] +include::retrieve-inner-hits.asciidoc[] +include::search-shard-routing.asciidoc[] +include::search-using-query-rules.asciidoc[] +include::search-template.asciidoc[] diff --git a/docs/reference/search/search-your-data/search-application-api.asciidoc b/docs/reference/search/search-your-data/search-application-api.asciidoc new file mode 100644 index 0000000000000..29624a5bcd83a --- /dev/null +++ b/docs/reference/search/search-your-data/search-application-api.asciidoc @@ -0,0 +1,651 @@ +[[search-application-api]] +=== Search Applications search API and templates + +++++ +Search API and templates +++++ + +Your <> use <> to perform searches. +Templates help reduce complexity by exposing only template parameters, while using the full power of {es}'s query DSL to formulate queries. +Templates may be set when creating or updating a search application, and can be customized. +This template can be edited or updated at any time using the <> API call. + +In a nutshell, you create search templates with parameters instead of specific hardcoded search values. +At search time, you pass in the actual values for these parameters, enabling customized searches without rewriting the entire query structure. +Search Application templates: + +* Simplify query requests +* Reduce request size +* Ensure security and performance, as the query is predefined and can't be changed arbitrarily + +This document provides some sample templates to get you started using <> for additional use cases. +These templates are designed to be easily modified to meet your needs. +Once you've created a search application with a template, you can search your search application using this template. + +[TIP] +==== +Search templates use the https://mustache.github.io/[Mustache] templating language. +Mustache variables are typically enclosed in double curly brackets like this: `{{my-var}}`. + +Learn more by reading about <>. +==== + +[discrete] +[[search-application-api-default-template]] +==== Default template example + +If no template is stored with a search application, a minimal <> will be applied at search time. +The default template implements a simple search use case. +You can check your query parameters against the current template using the <> API call. + +The default template is very minimal: + +[source,js] +---- +{ + "template": { + "script": { + "source": { + "query": { + "query_string": { + "query": "{{query_string}}", + "default_field": "{{default_field}}" + } + } + }, + "params": { + "query_string": "*", + "default_field": "*" + } + } + } +} +---- +// NOTCONSOLE + +This may be useful for initial exploration of search templates, but you'll likely want to update this. + +Here are some things to note about this default template: + +* A call to `/_application/search_application/` with no parameters will return all results, in a similar manner to a parameterless call to `/_search`. +* Searching with the `query_string` and/or `default_field` parameters will perform a <> query. +* This template does not support additional parameters, including `from`, `size` or `boost`. + +[WARNING] +==== +This template is subject to change in future versions of the Search Applications feature. +==== + +Try some of the other examples in this document to experiment with specific use cases, or try creating your own! + +[discrete] +[[search-application-api-searching]] +==== Searching a search application + +[discrete] +[[search-application-api-searching-templates]] +===== Template search + +The simplest way to interact with a search application is to use the search template that's created and stored with it. +Each search application has a single template associated with it, which defines search criteria, parameters and defaults. + +You send search requests to a search application using the <>. + +With the default template, a search looks like this: + +[source,console] +---- +POST _application/search_application//_search +{ + "params": { + "query_string": "my first query" + } +} +---- +// TEST[skip:TODO] + +In this example, we've overridden the `query_string` parameter's default value of `*`. +Since we didn't specify `default_field` the value of this parameter will still be `*`. + +[discrete] +[[search-application-api-searching-alias]] +===== Alias search + +If you don't want to set up a search template for your search application, an alias will be created with the same name as your search application. +This may be helpful when experimenting with specific search queries that you want to use when building your search application's search template. + +If your search application's name is `my_search_application`, your alias will be `my_search_application`. +You can search this using the <>. + +[NOTE] +==== +You should use the Search Applications management APIs to update your application and _not_ directly use {es} APIs such as the alias API. +For example, use <> with the `indices` parameter. +This will automatically keep the associated alias up to date and ensure that indices are added to the search application correctly. +==== + +[discrete] +[[search-application-api-examples]] +==== Search template examples + +We have created a number of examples to explore specific use cases. +Use these as a starting point for creating your own search templates. + +[discrete] +[[search-application-api-bm25-template]] +===== Text search example + +The following template supports a `multi_match` search over specified fields and boosts: + +[source,console] +---- +PUT _application/search_application/my_search_application +{ + "indices": ["my_index1", "my_index2"], + "template": { + "script": { + "lang": "mustache", + "source": """ + { + "query": { + "multi_match": { + "query": "{{query_string}}", + "fields": [{{#text_fields}}"{{name}}^{{boost}}",{{/text_fields}}] + } + }, + "explain": "{{explain}}", + "from": "{{from}}", + "size": "{{size}}" + } + """, + "params": { + "query_string": "*", + "text_fields": [ + {"name": "title", "boost": 10}, + {"name": "description", "boost": 5} + ], + "explain": false, + "from": 0, + "size": 10 + } + } + } +} +---- +// TEST[skip:TODO] + +A search query using this template might look like this: +[source,console] +---- +POST _application/search_application/my_search_application/_search +{ + "params": { + "size": 5, + "query_string": "mountain climbing", + "text_fields": [ + {"name": "title", "boost": 10}, + {"name": "description", "boost": 2}, + {"name": "state", "boost": 1} + ] + } +} +---- +// TEST[skip:TODO] + +The `text_fields` parameters can be overridden with new/different fields and boosts to experiment with the best configuration for your use case. +This template also supports pagination and `explain` via parameters. + +[discrete] +[[search-application-api-rrf-template]] +===== Text search + ELSER with RRF + +This example supports the <> method for combining BM25 and {ml-docs}/ml-nlp-elser.html[ELSER] searches. +Reciprocal Rank Fusion consistently improves the combined results of different search algorithms. +It outperforms all other ranking algorithms, and often surpasses the best individual results, without calibration. + +[source,console] +---- +PUT _application/search_application/my-search-app +{ + "indices": [ + "search-my-crawler" + ], + "template": { + "script": { + "lang": "mustache", + "source": """ + { + "sub_searches": [ + {{#text_fields}} + { + "query": { + "match": { + "{{.}}": "{{query_string}}" + } + } + }, + {{/text_fields}} + {{#elser_fields}} + { + "query": { + "text_expansion": { + "ml.inference.{{.}}_expanded.predicted_value": { + "model_text": "{{query_string}}", + "model_id": "" + } + } + } + }, + {{/elser_fields}} + ], + "rank": { + "rrf": { + "window_size": {{rrf.window_size}}, + "rank_constant": {{rrf.rank_constant}} + } + } + } + """, + "params": { + "elser_fields": ["title", "meta_description"], + "text_fields": ["title", "meta_description"], + "query_string": "", + "rrf": { + "window_size": 100, + "rank_constant": 60 + } + } + } + } +} +---- +// TEST[skip:TODO] + +NOTE: Replace `` with the model ID of your ELSER deployment. + +A sample query for this template will look like the following example: + +[source,console] +---- +POST _application/search_application/my-search-app/_search +{ + "params": { + "query_string": "What is the most popular brand of coffee sold in the United States?", + "elser_fields": ["title", "meta_description"], + "text_fields": ["title", "meta_description"], + "rrf": { + "window_size": 50, + "rank_constant": 25 + } + } +} +---- +// TEST[skip:TODO] + +[discrete] +[[search-application-api-catchall-template]] +===== Text search + ELSER + +The Elastic Learned Sparse EncodeR ({ml-docs}/ml-nlp-elser.html[ELSER]) improves search relevance through text-expansion, which enables semantic search. +This experimental template requires ELSER to be enabled for one or more fields. +Refer to <> for more information on how to use ELSER. +In this case, ELSER is enabled on the `title` and `description` fields. + +This example provides a single template that you can use for various search application scenarios: text search, ELSER, or all of the above. +It also provides a simple default `query_string` query if no parameters are specified. + +[source,console] +---- +PUT _application/search_application/my_search_application +{ + "indices": [ + "my_index1", + "my_index2" + ], + "template": { + "script": { + "lang": "mustache", + "source": """ + { + "query": { + "bool": { + "should": [ + {{#text}} + { + "multi_match": { + "query": "{{query_string}}", + "fields": [{{#text_fields}}"{{name}}^{{boost}}",{{/text_fields}}], + "boost": "{{text_query_boost}}" + } + }, + {{/text}} + {{#elser}} + {{#elser_fields}} + { + "text_expansion": { + "ml.inference.{{name}}_expanded.predicted_value": { + "model_text": "{{query_string}}", + "model_id": ".elser_model_1", + "boost": "{{boost}}" + } + } + }, + {{/elser_fields}} + { "bool": { "must": [] } }, + {{/elser}} + {{^text}} + {{^elser}} + { + "query_string": { + "query": "{{query_string}}", + "default_field": "{{default_field}}", + "default_operator": "{{default_operator}}", + "boost": "{{text_query_boost}}" + } + }, + {{/elser}} + {{/text}} + { "bool": { "must": [] } } + ], + "minimum_should_match": 1 + } + }, + "min_score": "{{min_score}}", + "explain": "{{explain}}", + "from": "{{from}}", + "size": "{{size}}" + } + """, + "params": { + "text": false, + "elser": false, + "elser_fields": [ + {"name": "title", "boost": 1}, + {"name": "description", "boost": 1} + ], + "text_fields": [ + {"name": "title", "boost": 10}, + {"name": "description", "boost": 5}, + {"name": "state", "boost": 1} + ], + "query_string": "*", + "text_query_boost": 4, + "default_field": "*", + "default_operator": "OR", + "explain": false, + "from": 0, + "size": 10, + "min_score": 0 + } + } + } +} +---- +// TEST[skip:TODO] + +A text search query using this template might look like this: +[source,console] +---- +POST _application/search_application/my_search_application/_search +{ + "params": { + "text": true, + "size": 5, + "query_string": "mountain climbing", + "text_fields": [ + {"name": "title", "boost": 10}, + {"name": "description", "boost": 5}, + {"name": "state", "boost": 1} + ] + } +} +---- +// TEST[skip:TODO] + +An ELSER search query using this template will look like the following example: +[source,console] +---- +POST _application/search_application/my_search_application/_search +{ + "params": { + "elser": true, + "query_string": "where is the best mountain climbing?", + "elser_fields": [ + {"name": "title", "boost": 1}, + {"name": "description", "boost": 1} + ] + } +} +---- +// TEST[skip:TODO] + +A combined text search and ELSER search query using this template will look like the following example: +[source,console] +---- +POST _application/search_application/my_search_application/_search +{ + "params": { + "elser": true, + "text": true, + "query_string": "where is the best mountain climbing?", + "elser_fields": [ + {"name": "title", "boost": 1}, + {"name": "description", "boost": 1} + ], + "text_query_boost": 4, + "min_score": 10 + } +} +---- +// TEST[skip:TODO] + +[TIP] +==== +Text search results and ELSER search results are expected to have significantly different scores in some cases, which makes ranking challenging. +To find the best search result mix for your dataset, we suggest experimenting with the boost values provided in the example template: + +* `text_query_boost` to boost the BM25 query as a whole +* {ref}/query-dsl-query-string-query.html#_boosting[`boost`] fields to boost individual text search fields +* <> parameter to omit significantly low confidence results + +The above boosts should be sufficient for many use cases, but there are cases when adding a <> query or <> to your template may be beneficial. +Remember to update your search application to use the new template using the <>. +==== + +Finally, a parameterless search using this template would fall back to a default search returning all documents: + +[source,console] +---- +POST _application/search_application/my_search_application/_search +---- +// TEST[skip:TODO] + +[discrete] +[[search-application-api-elser-template]] +===== ELSER search +This example supports a streamlined version of ELSER search. + +[source,console] +---- +PUT _application/search_application/my_search_application +{ + "indices": [ + "my_index1", + "my_index2" + ], + "template": { + "script": { + "lang": "mustache", + "source": """ + { + "query": { + "bool": { + "should": [ + {{#elser_fields}} + { + "text_expansion": { + "ml.inference.{{name}}_expanded.predicted_value": { + "model_text": "{{query_string}}", + "model_id": "" + } + } + }, + {{/elser_fields}} + ] + } + }, + "min_score": "{{min_score}}" + } + """, + "params": { + "query_string": "*", + "min_score": "10", + "elser_fields": [ + { + "name": "title" + }, + { + "name": "description" + } + ] + } + } + } +} +---- +// TEST[skip:TODO] + +NOTE: Replace `` with the model ID of your ELSER deployment. + +A sample query for this template will look like the following example: + +[source,console] +---- +POST _application/search_application/my_search_application/_search + { + "params": { + "query_string": "Where is the best place for mountain climbing?" + } + } +---- +// TEST[skip:TODO] + + +[discrete] +[[search-applications-knn-template]] +===== kNN search +This example supports <> + +A template supporting exact kNN search will look like the following example: + +[source,console] +---- +PUT _application/search_application/my_search_application +{ + "indices": [ + "my_product_index" + ], + "template": { + "script": { + "lang": "mustache", + "source": """ + { + "query": { + "script_score": { + "query": { + "bool": { + "filter": { + "range": { + "{{field}}": { + "{{operator}}": {{value}} + } + } + } + } + }, + "script": { + "source": "cosineSimilarity({{#toJson}}query_vector{{/toJson}}, '{{dense_vector_field}}') + 1.0" + } + } + } + } + """, + "params": { + "field": "price", + "operator": "gte", + "value": 1000, + "dense_vector_field": "product-vector", + "query_vector": [] + } + } + } +} +---- +// TEST[skip:TODO] + +A search query using this template will look like the following example: +[source,console] +---- +POST _application/search_application/my_search_application/_search +{ + "params": { + "field": "price", + "operator": "gte", + "value": 500 + } +} +---- +// TEST[skip:TODO] + +A template supporting approximate kNN search will look like the following example: + +[source,console] +---- +PUT _application/search_application/my_search_application +{ + "indices": [ + "my_product_index" + ], + "template": { + "script": { + "lang": "mustache", + "source": """ + { + "knn": { + "field": "{{knn_field}}", + "query_vector": {{#toJson}}query_vector{{/toJson}}, + "k": "{{k}}", + "num_candidates": {{num_candidates}} + }, + "fields": {{#toJson}}fields{{/toJson}} + } + """, + "params": { + "knn_field": "image-vector", + "query_vector": [], + "k": 10, + "num_candidates": 100, + "fields": ["title", "file-type"] + } + } + } +} +---- +// TEST[skip:TODO] + +A search query using this template will look like the following example: +[source,console] +---- +POST _application/search_application/my_search_application/_search +{ + "params": { + "knn_field": "image-vector", + "query_vector": [-5, 9, -12], + "k": 10, + "num_candidates": 100, + "fields": ["title", "file-type"] + } +} + +---- +// TEST[skip:TODO] diff --git a/docs/reference/search/search-your-data/search-application-client.asciidoc b/docs/reference/search/search-your-data/search-application-client.asciidoc new file mode 100644 index 0000000000000..c7d4325fadea7 --- /dev/null +++ b/docs/reference/search/search-your-data/search-application-client.asciidoc @@ -0,0 +1,808 @@ +[[search-application-client]] +=== Build a search experience with the Search Application client +++++ +Search Application client guide +++++ + +This document is a how-to guide to building a search experience with a <>, using the https://github.com/elastic/search-application-client[Search Application client^]. +The client is a JavaScript library designed to be used in the browser. +You'll integrate this library into your web app to simplify querying your search application. + +[TIP] +==== +A https://github.com/elastic/search-application-client/blob/main/examples/sandbox/README.md[sandbox environment] is available for testing and experimenting with the `search-application-client` library. +Jump there if you'd like to try out the client without setting up your own web app. + + +Clone the https://github.com/elastic/search-application-client[repository] and follow the instructions in the README to get started. +==== + +[discrete] +[[search-application-client-client-goal]] +=== Goal + +This guide assumes you want to build a web app with the following search features: + +* Search bar and results with custom relevance +* Control over the presentation of results, such as inclusion/exclusion of fields and highlighting of matching terms +* UI controls such as facets, filters, sorts, pagination + +You can think of the search application as the "server side" that persists changes to {es}. +Your web app acts as the "client side" that queries the search application. +You'll be making edits to both your search application and your web app to complete the implementation. + +[discrete] +[[search-application-client-client-prerequisites]] +=== Prerequisites + +To follow this guide, you'll need: + +* An *Elastic deployment*, that satisfies the <> for running a search application. +** If you don't have an Elastic deployment, start a free trial on https://cloud.elastic.co[Elastic Cloud^]. +* A *search application*. +** Create and manage search applications in the <> or using the <>. +* A *web app* to query your search application, using https://github.com/elastic/search-application-client#installation[Search Application client]. + +[discrete] +[[search-application-client-client-configuration]] +=== Install and configure the client + +[discrete] +[[search-application-client-client-configuration-install]] +==== Install the client + +https://github.com/elastic/search-application-client/blob/main/README.md#installation[Install^] the client using npm, yarn, or a CDN. + +*Option 1: Using package manager* + +To install the client using *npm*, run the following command: + +[source, bash] +---- +npm install @elastic/search-application-client +---- + +To install the client using *yarn*, run the following command: + +[source, bash] +---- +yarn add @elastic/search-application-client +---- + +*Option 2: Using CDN with HTML ` +---- + +[discrete] +[[search-application-client-client-configuration-import]] +==== Import and initialize the client + +Once installed, you can import the client into your web app. +You'll need the following information to initialize the client: + +* The *name* of your search application +* The *URL endpoint* for your search application +* The *API key* for your search application + +Find this information on the *Connect* page in the {kib} UI. + +[discrete] +[[search-application-client-client-configuration-import-js]] +===== Option 1: Using JavaScript modules + +Use the following import statement: + +[source, js] +---- +import SearchApplicationClient from '@elastic/search-application-client'; +---- +// NOTCONSOLE + +Configure the client with your deployment details to start making search requests. +You can generate an API key on the *Connect* page in the {kib} UI. +Go to *Search > Search Applications >* *> Connect*. +You'll find the following information prepopulated to initialize the client: + +[source, js] +---- +import Client from '@elastic/search-application-client' + +const request = Client( + 'my-search-application', // search application name + 'url-from-connect-page', // url-host + 'api-key-from-connect-page', // api-key + { + // optional configuration + } +) +---- +// NOTCONSOLE + +Once configured you'll be able to make search requests to your search application using the https://github.com/elastic/search-application-client#api-reference[client API], like this: + +[source, js] +---- +const results = await request() + .query('star wars') + .search() +---- +// NOTCONSOLE + +[discrete] +[[search-application-client-client-configuration-import-cdn]] +===== Option 2: Using CDN + +Alternatively, if you're using a CDN, you can import the client using the following statement: + +[source, html] +---- + +---- + +Configure the client with your deployment details to start making search requests. +You can generate an API key on the *Connect* page in the {kib} UI. +Go to *Search > Search Applications >* *> Connect*. +You'll find the following information prepopulated to initialize the client: + +[source, html] +---- + +---- + +Once configured you'll be able to make search requests to your search application using the https://github.com/elastic/search-application-client#api-reference[client API], like this: + +[source, html] +---- + +---- + +[discrete] +[[search-application-client-client-template]] +=== Working with your search template + +The Search Application client is designed to work with any +<> you create. +You'll use the Search Application APIs to create and manage your search templates. + +[TIP] +==== +When working with the Search Application APIs to manage templates, we provide the API examples using {kibana-ref}/console-kibana.html[{kib} Console^] syntax. +==== + +Here's an example template: + +[source, console] +---- +PUT _application/search_application/my-example-app +{ + "indices": ["my-example-app"], + "template": { + "script": { + "lang": "mustache", + "source": """ + { + "query": { + "bool": { + "must": [ + {{#query}} + { + "query_string": { + "query": "{{query}}", + "search_fields": {{#toJson}}search_fields{{/toJson}} + } + } + {{/query}} + ] + } + } + } + """, + "params": { + "query": "", + "search_fields": "" + } + } + } +} +---- +// TEST[skip:TODO] + +This will allow you to add any template parameters you need to your template and then provide the values in your client request. +Use `addParameter` to inject actual values into your template parameters. + +For example, pass in values for `search_fields` like this: + +[source, js] +---- +const results = await request() + .query('star wars') // requires the template to use query parameter + .addParameter('search_fields', ['title', 'description']) + .search() +---- +// NOTCONSOLE + +[discrete] +[[search-application-client-client-template-example]] +==== Example template + +We recommend getting started with the https://github.com/elastic/search-application-client#boilerplate-template[boilerplate template^] provided in the client repository. +https://github.com/elastic/search-application-client/blob/main/bin/boilerplate_template.js[View this script^] to see how this is used. +The `dictionary` parameter is used to pass in a JSON schema definition that describes structure and validation rules for the request object. +This schema is important, because it restricts the use of certain features in the {es} query. +https://github.com/elastic/search-application-client/blob/main/bin/request_schema.json[View the schema^]. + +Each search functionality in this guide requires a feature included in this template. +These features require specific parameters to be present in the template: + +* Query: `query` +* Filters: `_es_filters` +* Faceting: `_es_filters` and `_es_aggs` +* Sorting: `_es_sort_fields` +* Pagination: `from` and `size` + +[discrete] +[[search-application-client-client-features]] +=== Search features + +We will explore all the essential basics you'll need for a search experience. +You'll learn how to implement them using your search application and query them using the client. + +[TIP] +==== +Refer to the https://github.com/elastic/search-application-client#api-reference[client repo^] for information on the available methods and their parameters. +==== + +[discrete] +[[search-application-client-client-features-relevance]] +==== Customizing relevance + +Our simple template uses `query_string` searching across all fields, but this may not suit your use case. +You can update the template to provide better relevance recall. + +In the below example, we’re using a `multi-match` query against our template, with `best_fields` and `phrase_prefix` queries targeting different search fields. + +[source, console] +---- +PUT _application/search_application/my-example-app +{ + "indices": ["example-index"], + "template": { + "script": { + "lang": "mustache", + "source": """ + { + "query": { + "bool": { + "must": [ + {{#query}} + { + "multi_match" : { + "query": "{{query}}", + "fields": [ "title^4", "plot", "actors", "directors" ] + } + }, + { + "multi_match" : { + "query": "{{query}}", + "type": "phrase_prefix", + "fields": [ "title^4", "plot"] + } + }, + {{/query}} + ], + "filter": {{#toJson}}_es_filters{{/toJson}} + } + }, + "aggs": {{#toJson}}_es_aggs{{/toJson}}, + "from": {{from}}, + "size": {{size}}, + "sort": {{#toJson}}_es_sort_fields{{/toJson}} + } + """, + "params": { + "query": "", + "_es_filters": {}, + "_es_aggs": {}, + "_es_sort_fields": {}, + "size": 10, + "from": 0 + }, + "dictionary": { + // add dictionary restricting + // _es_filters, _es_sort_fields & _es_aggs params + // Use example provided in repo: https://github.com/elastic/search-application-client/blob/main/bin/request_schema.json + } + } + } +} +---- +// TEST[skip:TODO] + +Refer to +//<> +for examples of different types of queries, including combinations of text search, kNN search, ELSER search, hybrid search with RRF, and more. + +*Use case: I want to dynamically adjust the search fields* + +If you need to adjust `search_fields` at query request time, you can add a new parameter to the template (for example: `search_fields`) and use the `addParameter` method to provide the fields to the template. + +*Use case: I want to boost results given a certain proximity to the user* + +You can add additional template parameters to send the geo-coordinates of the user. +Then use <> to boost documents which match a certain <> from the user. + +[discrete] +[[search-application-client-client-features-result-fields]] +=== Result fields + +By default, all fields are returned in the `_source` field. +To restrict the fields returned, specify the fields in the template. + +[source, console] +---- +PUT _application/search_application/my-example-app +{ + "indices": ["example-index"], + "template": { + "script": { + "lang": "mustache", + "source": """ + { + "query": { + "bool": { + "must": [ + {{#query}} + // ... + {{/query}} + ], + "filter": {{#toJson}}_es_filters{{/toJson}} + } + }, + "_source": { + "includes": ["title", "plot"] + }, + "aggs": {{#toJson}}_es_aggs{{/toJson}}, + "from": {{from}}, + "size": {{size}}, + "sort": {{#toJson}}_es_sort_fields{{/toJson}} + } + """, + "params": { + "query": "", + "_es_filters": {}, + "_es_aggs": {}, + "_es_sort_fields": {}, + "size": 10, + "from": 0 + }, + "dictionary": { + // add dictionary restricting _es_filters and _es_aggs params + // Use the dictionary example provided in repo: https://github.com/elastic/search-application-client/blob/main/bin/request_schema.json + } + } + } +} +---- +// TEST[skip:TODO] + +*Use case: I want to dynamically adjust the result fields* + +If you need to adjust the fields returned at query request time, you can add a new parameter to the template (for example: `result_fields`) and use the `addParameter` method to provide the fields to the template. + +[discrete] +[[search-application-client-client-features-highlight-snippets]] +==== Highlighting and snippets + +Highlighting support is straightforward to add to the template. +With the <>, you can specify which fields you want to highlight for matches. + +In the following example, we specify `title` and `plot` as the highlighted fields. +`title` typically has a short value length, compared to `plot` which is variable and tends to be longer. + +We specify the title to be `fragment_size` of `0` to return all of the text when there is a highlight. +We specify the plot to be `fragment_size` of `200`, where each highlighted fragment will be up to 200 characters long. + +[source, console] +---- +PUT _application/search_application/my-example-app +{ + "indices": ["example-index"], + "template": { + "script": { + "lang": "mustache", + "source": """ + { + "query": { + "bool": { + "must": [ + {{#query}} + // ... + {{/query}} + ], + "filter": {{#toJson}}_es_filters{{/toJson}} + } + }, + "_source": { + "includes": ["title", "plot"] + }, + "highlight": { + "fields": { + "title": { "fragment_size": 0 }, + "plot": { "fragment_size": 200 } + } + }, + "aggs": {{#toJson}}_es_aggs{{/toJson}}, + "from": {{from}}, + "size": {{size}}, + "sort": {{#toJson}}_es_sort_fields{{/toJson}} + } + """, + "params": { + "query": "", + "_es_filters": {}, + "_es_aggs": {}, + "_es_sort_fields": {}, + "size": 10, + "from": 0 + }, + "dictionary": { + // add dictionary restricting _es_filters and _es_aggs params + // Use the dictionary example provided in repo: https://github.com/elastic/search-application-client/blob/main/bin/request_schema.json + } + } + } +} +---- +// TEST[skip:TODO] + +If a match was found, this will return the results with a highlight field. +For example: + +[source, js] +---- +{ + "hits": [ + { + "_index": "movies", + "_type": "_doc", + "_id": "1", + "_score": 0.2876821, + "_source": { + "title": "The Great Gatsby", + "plot": "The Great Gatsby is a novel by F. Scott Fitzgerald that follows the story of Jay Gatsby, a wealthy and mysterious man, as he tries to win back the love of his life, Daisy Buchanan." + }, + "highlight": { + "title": ["The Great Gatsby"], + "plot": [ + "The Great Gatsby is a novel by F. Scott Fitzgerald that follows the story of Jay Gatsby, a wealthy and mysterious man, as he tries to win back the love of his life, Daisy Buchanan." + ] + } + } + ] +} +---- +// NOTCONSOLE + +[discrete] +[[search-application-client-client-features-highlight-helpers]] +===== Highlighting helper + +When displaying the fields in the frontend, you will need to first determine if the field has a highlight. +To simplify this, we provide a helper. + +[source, js] +---- +import Client, { Highlight } from '@elastic/search-application-client' + +// example React component +const ResultsList = ({ hits } ) => { + return hits.map((hit) => ( +
+
{Highlight(hit, "title")}
+
{Highlight(hit, "plot")}
+
+ )) +} +---- +// NOTCONSOLE + +[discrete] +[[search-application-client-client-features-pagination]] +==== Pagination + +To use pagination, set the page number and the page size. +By default, the page size is 10. +The `size` and `from` parameters allow you to control the page and number of hits returned in the response. + +We can do this using the client with the `setSize` and `setFrom` methods. + +[source, js] +---- +// page 1 +const results = await request() + .setSize(20) + .setFrom(0) + .search() + +// page 2 +const results = await request() + .setSize(20) + .setFrom(20) + .search() +---- +// NOTCONSOLE + +[discrete] +[[search-application-client-client-features-sorting]] +=== Sorting + +To use sorting, specify the field name and the sort order or `pass _score` to sort by relevance. +Requires the `_es_sort_fields_fields` param in the search template. +Refer to our <> to see where this is used. + +By default, the results will be sorted in order of score. +If you need to sort on a field other than the score, use the `setSort` method with an array of objects. + +[source, js] +---- +const results = await request() + .setSort([{ year: 'asc' }, '_score']) + .search() +---- +// NOTCONSOLE + +[discrete] +[[search-application-client-client-features-filter]] +==== Filtering + +The Search application client also supports filters and facets. +To use these, you need to add two parameters: + +* `_es_filters` +* `_es_aggs` + +Refer to our <> to see where these are used. + +[discrete] +[[search-application-client-client-features-filter-base]] +===== Base Filtering + +With a template that’s configured to use filters, use the `setFilter` method to add filters to your query. + +The boilerplate template schema only supports term, range, match, nested, geo_bounding_box and geo_distance filters. +If you need to use a particular clause, you can update the template schema. + +Below is an example of using `setFilter`. + +[source, js] +---- +// return only "star wars" movies that are rated PG +const results = await request() + .query('star wars') + .setFilter({ + term: { + 'rated.enum': 'PG', + }, + }) + .search() +---- +// NOTCONSOLE + +[discrete] +[[search-application-client-client-features-facets]] +==== Facets + +The client supports the ability to configure facets with your results. +Specify facets in the client initialization call. +For example, say we want to add facets for actors, directors and IMDB rating. + +[source, js] +---- +const request = Client( + 'my-example-app', // search application name + 'https://d1bd36862ce54c7b903e2aacd4cd7f0a.us-east4.gcp.elastic-cloud.com:443', // api-host + 'api-key-from-connect-page', // api-key + { + facets: { + actors: { + type: 'terms', + field: 'actors.keyword', + disjunctive: true, + }, + directors: { + type: 'terms', + field: 'director.keyword', + size: 20, + disjunctive: true, + }, + imdbrating: { + type: 'stats', + field: 'imdbrating', + }, + }, + } +) +---- +// NOTCONSOLE + +[NOTE] +==== +In {es}, the `keyword` type is used for fields that need to be searchable in their exact, unmodified form. +This means these queries are case-sensitive. +We use this type for facets because facets require aggregating and filtering data based on exact values or terms. +==== + +Use the `addFacetFilter` method to add facets to your query. + +In the following example, we only want to return movies: + +* Featuring Harrison Ford as actor +* Directed by George Lucas _or_ Ridley Scott +* With an IMBD rating greater than 7.5 + +[source, js] +---- +const results = await request() + .addFacetFilter('actors', 'Harrison Ford') + .addFacetFilter('directors', 'George Lucas') + .addFacetFilter('directors', 'Ridley Scott') + .addFacetFilter('imdbrating', { + gte: 7.5, + }) + .search() +---- +// NOTCONSOLE + +You can access the facets in the results: + +[source, js] +---- +{ + "took": 1, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 2, + "relation": "eq" + }, + "max_score": 0, + "hits": [ + { + "_index": "imdb_movies", + "_id": "tt0076759", + "_score": 0, + "_source": { + "title": "Star Wars: Episode IV - A New Hope", + "actors": [ + "Mark Hamill", + "Harrison Ford", + "Carrie Fisher", + "Peter Cushing" + ], + "plot": "Luke Skywalker joins forces with a Jedi Knight, a cocky pilot, a wookiee and two droids to save the universe from the Empire's world-destroying battle-station, while also attempting to rescue Princess Leia from the evil Darth Vader.", + "poster": "https://s3-eu-west-1.amazonaws.com/imdbimages/images/MV5BMTU4NTczODkwM15BMl5BanBnXkFtZTcwMzEyMTIyMw@@._V1_SX300.jpg" + } + }, + { + "_index": "imdb_movies", + "_id": "tt0083658", + "_score": 0, + "_source": { + "title": "Blade Runner", + "actors": [ + "Harrison Ford", + "Rutger Hauer", + "Sean Young", + "Edward James Olmos" + ], + "plot": "Deckard, a blade runner, has to track down and terminate 4 replicants who hijacked a ship in space and have returned to Earth seeking their maker.", + "poster": "https://s3-eu-west-1.amazonaws.com/imdbimages/images/MV5BMTA4MDQxNTk2NDheQTJeQWpwZ15BbWU3MDE2NjIyODk@._V1_SX300.jpg" + } + } + ] + }, + "aggregations": {}, + "facets": [ + { + "name": "imdbrating_facet", + "stats": { + "min": 8.300000190734863, + "max": 8.800000190734863, + "avg": 8.550000190734863, + "sum": 17.100000381469727, + "count": 2 + } + }, + { + "name": "actors_facet", + "entries": [ + { + "value": "Harrison Ford", + "count": 2 + }, + { + "value": "Carrie Fisher", + "count": 1 + }, + { + "value": "Edward James Olmos", + "count": 1 + }, + { + "value": "Mark Hamill", + "count": 1 + }, + { + "value": "Peter Cushing", + "count": 1 + }, + { + "value": "Rutger Hauer", + "count": 1 + }, + { + "value": "Sean Young", + "count": 1 + } + ] + }, + { + "name": "directors_facet", + "entries": [ + { + "value": "Steven Spielberg", + "count": 3 + }, + { + "value": "Andrew Davis", + "count": 1 + }, + { + "value": "George Lucas", + "count": 1 + }, + { + "value": "Irvin Kershner", + "count": 1 + }, + { + "value": "Richard Marquand", + "count": 1 + }, + { + "value": "Ridley Scott", + "count": 1 + } + ] + } + ] +} +---- +// NOTCONSOLE diff --git a/docs/reference/search/search-your-data/search-application-overview.asciidoc b/docs/reference/search/search-your-data/search-application-overview.asciidoc new file mode 100644 index 0000000000000..e12b55911740b --- /dev/null +++ b/docs/reference/search/search-your-data/search-application-overview.asciidoc @@ -0,0 +1,132 @@ +[[search-application-overview]] +== Elastic Search Applications + +++++ +Search Applications +++++ + +_Search Applications_ enable users to build search-powered applications that leverage the full power of {es} and its Query DSL, with a simplified user experience. +Create search applications based on your {es} indices, build queries using search templates, and easily preview your results directly in the {kib} Search UI. + +You can also interact with your search applications using the <>. +Search Applications are designed to simplify building unified search experiences across a range of enterprise search use cases, using the Elastic platform. + +.Search Applications documentation +**** +Documentation for the Search Applications feature lives in two places: + +* The documentation in this section covers the basics of Search Applications, information about working with Search Applications in the {kib} UI, and use case examples. +* The <> contains the API references for working with Search Applications programmatically. +Jump there if you're only interested in the APIs. +**** + +[discrete] +[[search-application-overview-prerequisites]] +=== Availability and prerequisites + +The Search Applications feature was introduced in Elastic version *8.8.0*. + +[NOTE] +==== +Search Applications is a beta feature. +Beta features are subject to change and are not covered by the support SLA of general release (GA) features. +Elastic plans to promote this feature to GA in a future release. +==== + +This feature is available to all *Elastic Cloud* deployments. + +This feature is also available to *self-managed* deployments when Elastic subscription requirements are satisfied. +View the requirements for this feature under the *Elastic Search* section of the https://www.elastic.co/subscriptions[Elastic Stack subscriptions^] page. + +Your deployment must include the {es} and {kib} services. + +Managing search applications requires the `manage_search_application` cluster privilege, and also requires the `manage` <> on all indices associated with the search application. + +[discrete] +[[search-application-overview-summary]] +=== Overview + +The {es} <> is powerful and flexible, but it comes with a steep learning curve. +Complex queries are verbose and hard to understand for non-experts. +We've designed search applications to be easier to search over, but with the flexibility of working with an {es} index. + +Search Applications use <> to simplify the process of building queries. +Templates are defined when creating a search application, and can be customized according to your needs. +Read <> for the details. + +[discrete] +[[search-application-overview-get-started]] +=== Get started + +[discrete] +[[search-application-overview-get-started-ui]] +==== Option 1: Get started in the UI + +You can create build, and manage your search applications directly in the {kib} UI under *Search*. +Make sure you have at least one {es} index to work with on your deployment. +The indices underlying your search application are searched together, similar to how an <> searches over multiple indices. + +To create a new search application in {kib}: + +. Go to *Search > Search Applications*. +. Select *Create*. +. Select the {es} indices you want to use for your search application. +. Name your search application. +. Select *Create*. + +Your search application should now be available in the list of search applications. + +//[.screenshot] +// image::../../images/search-applications/search-applications-create.png[Create search application screen] + +Once created, you can explore the documents in your search application under *Search > Search Applications >* _your-search-application_ > *Docs Explorer*. +From there, you can expand a matching {es} document to see its full contents. + +//[.screenshot] +// image::../../images/search-applications/search-applications-docs-explorer.png[Docs explorer screen with search results] + +[discrete] +[[search-application-overview-get-started-api]] +==== Option 2: Get started with the API + +Use the {es} <> to create a search application. + + +The following example creates a search application named `my_search_application` that searches over the `my_search_index1` and `my_search_index2` indices, along with defining a simple search template (Refer to <>). + +[source,console] +---- +PUT /_application/search_application/my_search_application +{ + "indices": [ "my_search_index1", "my_search_index2" ], + "template": { + "script": { + "source": { + "query": { + "query_string": { + "query": "{{query_string}}", + "default_field": "{{default_field}}" + } + } + }, + "params": { + "query_string": "*", + "default_field": "*" + } + } + } +} +---- +// TEST[skip:TODO] + +[discrete] +[[search-application-overview-get-started-templates]] +==== Search templates + +Search templates are the heart of your search applications. +The <> created for a search application is very minimal, and you'll want to customize it to suit your needs. +<> contains a number of examples to get you started, including the default template, as well as templates for text search, semantic search and hybrid search. + +include::search-application-api.asciidoc[] +include::search-application-security.asciidoc[] +include::search-application-client.asciidoc[] diff --git a/docs/reference/search/search-your-data/search-application-security.asciidoc b/docs/reference/search/search-your-data/search-application-security.asciidoc new file mode 100644 index 0000000000000..17d3e367aa0d8 --- /dev/null +++ b/docs/reference/search/search-your-data/search-application-security.asciidoc @@ -0,0 +1,241 @@ +[#search-application-security] +=== Using search applications with untrusted clients +++++ +Security +++++ + +When building a frontend application for search use cases, there are two main approaches to returning search results: + +. The client (user's browser) makes API requests to the application backend, which in turn makes a request to {es}. +The {es} cluster is not exposed to the end user. +. *The client (user's browser) makes API requests directly to the search service - in this case the {es} cluster is reachable to the client.* + +This guide describes best practices when taking the second approach. +Specifically, we will explain how to use search applications with frontend apps that make direct requests to the <>. + +This approach has a few advantages: + +* No need to maintain a passthrough query system between frontend applications and {es} +* Direct requests to {es} result in faster response times +* Query configuration is managed in one place: your search application configuration in {es} + +We will cover: + +* <> +* <> +* <> + +[discrete] +[[search-application-security-key-restrictions]] +==== Using {es} API keys with role restrictions + +When frontend applications can make direct API requests to {es}, it's important to limit the operations they can perform. +In our case, frontend applications should only be able to call the Search Application *Search API*. +To ensure this, we will create {es} API keys with <>. +A role restriction is used to specify under what conditions a role should be effective. + +The following {es} API key has access to the `website-product-search` search application, only through the Search Application Search API: + +[source,console] +---- +POST /_security/api_key +{ + "name": "my-restricted-api-key", + "expiration": "7d", + "role_descriptors": { + "my-restricted-role-descriptor": { + "indices": [ + { + "names": ["website-product-search"], <1> + "privileges": ["read"] + } + ], + "restriction": { + "workflows": ["search_application_query"] <2> + } + } + } +} +---- +// TEST[skip:TODO] + +<1> `indices.name` must be the name(s) of the Search Application(s), not the underlying {es} indices. +<2> `restriction.workflows` must be set to the concrete value `search_application_query`. + +[IMPORTANT] +==== +It is crucial to specify the workflow restriction. +Without this the {es} API key can directly call `_search` and issue arbitrary {es} queries. +This is insecure when dealing with untrusted clients. +==== + +The response will look like this: + +[source,console-result] +---- +{ + "id": "v1CCJYkBvb5Pg9T-_JgO", + "name": "my-restricted-api-key", + "expiration": 1689156288526, + "api_key": "ztVI-1Q4RjS8qFDxAVet5w", + "encoded": "djFDQ0pZa0J2YjVQZzlULV9KZ086enRWSS0xUTRSalM4cUZEeEFWZXQ1dw" +} +---- +// TEST[skip:TODO] + +The encoded value can then be directly used in the Authorization header. +Here's an example using cURL: + +[source,shell] +---- +curl -XPOST "http://localhost:9200/_application/search_application/website-product-search/_search" \ + -H "Content-Type: application/json" \ + -H "Authorization: ApiKey djFDQ0pZa0J2YjVQZzlULV9KZ086enRWSS0xUTRSalM4cUZEeEFWZXQ1dw" \ + -d '{ + "params": { + "field_name": "color", + "field_value": "red", + "agg_size": 5 + } +}' +---- +// NOTCONSOLE + +[TIP] +==== +If `expiration` is not present, by default {es} API keys never expire. +The API key can be invalidated using the <>. +==== + +[TIP] +==== +{es} API keys with role restrictions can also use field and document level security. +This further limits how frontend applications query a search application. +==== + +[discrete] +[[search-application-security-parameter-validation]] +==== Parameter validation with search applications + +Your search applications use <> to render queries. +The template parameters are passed to the Search Application Search API. +In the case of APIs used by frontend applications or untrusted clients, we need to have strict parameter validation. +Search applications define a JSON schema that describes which parameters the Search Application Search API allows. + +The following example defines a search application with strict parameter validation: + +[source,console] +---- +PUT _application/search_application/website-product-search +{ + "indices": [ + "website-products" + ], + "template": { + "script": { + "source": { + "query": { + "term": { + "{{field_name}}": "{{field_value}}" + } + }, + "aggs": { + "color_facet": { + "terms": { + "field": "color", + "size": "{{agg_size}}" + } + } + } + }, + "params": { + "field_name": "product_name", + "field_value": "hello world", + "agg_size": 5 + } + }, + "dictionary": { + "properties": { + "field_name": { + "type": "string", + "enum": ["name", "color", "description"] + }, + "field_value": { + "type": "string" + }, + "agg_size": { + "type": "integer", + "minimum": 1, + "maximum": 10 + } + }, + "required": [ + "field_name" + ], + "additionalProperties": false + } + } +} +---- +// TEST[skip:TODO] + +Using that definition, the Search Application Search API performs the following parameter validation: + +* It only accepts the `field_name`, `field_value` and `aggs_size` parameters +* `field_name` is restricted to only take the values "name", "color" and "description" +* `agg_size` defines the size of the term aggregation and it can only take values between `1` and `10` + +[discrete] +[[search-application-security-cors]] +==== Working with CORS + +Using this approach means that your user's browser will make requests to the {es} API directly. +{es} supports https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS[Cross-Origin Resource Sharing (CORS)^], but this feature is disabled by default. +Therefore the browser will block these requests. + +There are two workarounds for this: + +* <> +* <> + +[discrete] +[[search-application-security-cors-elasticsearch]] +===== Enable CORS on {es} + +This is the simplest option. +Enable CORS on {es} by adding the following to your `elasticsearch.yml` file: + +[source,yaml] +---- +http.cors.allow-origin: "*" # Only use unrestricted value for local development +# Use a specific origin value in production, like `http.cors.allow-origin: "https://"` +http.cors.enabled: true +http.cors.allow-credentials: true +http.cors.allow-methods: OPTIONS, POST +http.cors.allow-headers: X-Requested-With, X-Auth-Token, Content-Type, Content-Length, Authorization, Access-Control-Allow-Headers, Accept +---- + +On Elastic Cloud, you can do this by {cloud}/ec-add-user-settings.html#ec-add-user-settings[editing your {es} user settings]. + +. From your deployment menu, go to the *Edit* page. +. In the *{es}* section, select *Manage user settings and extensions*. +. Update the user settings with the configuration above. +. Select *Save changes*. + +[discrete] +[[search-application-security-cors-proxy-request]] +===== Proxy the request through a server that supports CORS + +If you are unable to enable CORS on {es}, you can proxy the request through a server that supports CORS. +This is more complicated, but is a viable option. + +[discrete] +[[search-application-security-learn-more]] +==== Learn more + +* <> +* <> +* <> +* <> +** <> +** <> \ No newline at end of file diff --git a/docs/reference/search/search-your-data/search-multiple-indices.asciidoc b/docs/reference/search/search-your-data/search-multiple-indices.asciidoc index c257de0af96e8..4052097e73c91 100644 --- a/docs/reference/search/search-your-data/search-multiple-indices.asciidoc +++ b/docs/reference/search/search-your-data/search-multiple-indices.asciidoc @@ -1,5 +1,5 @@ [[search-multiple-indices]] -== Search multiple data streams and indices +=== Search multiple data streams and indices To search multiple data streams and indices, add them as comma-separated values in the <>'s request path. @@ -78,7 +78,7 @@ GET /*/_search [discrete] [[index-boost]] -=== Index boost +==== Index boost When searching multiple indices, you can use the `indices_boost` parameter to boost results from one or more specified indices. This is useful when hits diff --git a/docs/reference/search/search-your-data/search-shard-routing.asciidoc b/docs/reference/search/search-your-data/search-shard-routing.asciidoc index c959ab63f99c6..71e20c4f94071 100644 --- a/docs/reference/search/search-your-data/search-shard-routing.asciidoc +++ b/docs/reference/search/search-your-data/search-shard-routing.asciidoc @@ -1,5 +1,5 @@ [[search-shard-routing]] -== Search shard routing +=== Search shard routing To protect against hardware failure and increase search capacity, {es} can store copies of an index's data across multiple shards on multiple nodes. When running @@ -9,7 +9,7 @@ _search shard routing_ or _routing_. [discrete] [[search-adaptive-replica]] -=== Adaptive replica selection +==== Adaptive replica selection By default, {es} uses _adaptive replica selection_ to route search requests. This method selects an eligible node using <>, which routes the document to a @@ -151,7 +151,7 @@ GET /my-index-000001/_search?routing=my-routing-value,my-routing-value-2 [discrete] [[search-concurrency-and-parallelism]] -=== Search concurrency and parallelism +==== Search concurrency and parallelism By default, {es} doesn't reject search requests based on the number of shards the request hits. However, hitting a large number of shards can significantly diff --git a/docs/reference/search/search-your-data/search-template.asciidoc b/docs/reference/search/search-your-data/search-template.asciidoc index 6f6e0f2d81f72..9fb6ee85ffcca 100644 --- a/docs/reference/search/search-your-data/search-template.asciidoc +++ b/docs/reference/search/search-your-data/search-template.asciidoc @@ -1,6 +1,6 @@ [[search-template]] -== Search templates +=== Search templates A search template is a stored search you can run with different variables. @@ -13,7 +13,7 @@ your searches without modifying your app's code. [discrete] [[create-search-template]] -=== Create a search template +==== Create a search template To create or update a search template, use the <>. @@ -56,7 +56,7 @@ Settings that limit or disable scripts also affect search templates. [discrete] [[validate-search-template]] -=== Validate a search template +==== Validate a search template [[_validating_templates]] To test a template with different `params`, use the @@ -120,7 +120,7 @@ POST _render/template [discrete] [[run-templated-search]] -=== Run a templated search +==== Run a templated search To run a search with a search template, use the <>. You can specify different `params` with each request. @@ -187,7 +187,7 @@ response. [discrete] [[run-multiple-templated-searches]] -=== Run multiple templated searches +==== Run multiple templated searches To run multiple templated searches with a single request, use the <>. These requests often have @@ -206,7 +206,7 @@ GET my-index/_msearch/template [discrete] [[get-search-templates]] -=== Get search templates +==== Get search templates To retrieve a search template, use the <>. @@ -228,7 +228,7 @@ GET _cluster/state/metadata?pretty&filter_path=metadata.stored_scripts [discrete] [[delete-search-template]] -=== Delete a search template +==== Delete a search template To delete a search template, use the <>. @@ -241,7 +241,7 @@ DELETE _scripts/my-search-template [discrete] [[search-template-set-default-values]] -=== Set default values +==== Set default values To set a default value for a variable, use the following syntax: @@ -275,7 +275,7 @@ POST _render/template [discrete] [[search-template-url-encode-strings]] -=== URL encode strings +==== URL encode strings Use the `{{#url}}` function to URL encode a string. @@ -314,7 +314,7 @@ The template renders as: [discrete] [[search-template-concatenate-values]] -=== Concatenate values +==== Concatenate values Use the `{{#join}}` function to concatenate array values as a comma-delimited string. For example, the following template concatenates two email addresses. @@ -399,7 +399,7 @@ The template renders as: [discrete] [[search-template-convert-json]] -=== Convert to JSON +==== Convert to JSON Use the `{{#toJson}}` function to convert a variable value to its JSON representation. @@ -521,7 +521,7 @@ The template renders as: [discrete] [[search-template-use-conditions]] -=== Use conditions +==== Use conditions To create if conditions, use the following syntax: @@ -630,14 +630,15 @@ POST _render/template } } ---- + [[search-template-with-mustache-examples]] -=== Search template examples with Mustache +==== Search template examples with Mustache The mustache templating language defines various tag types you can use within templates. The following sections describe some of these tag types and provide examples of using them in {es} <>. [discrete] [[search-template-mustache-variable]] -=== Mustache variables +==== Mustache variables Mustache tags are typically enclosed in double curly brackets. A mustache variable: `{{my-variable}}` is a type of mustache tag. When you run a templated search, {es} replaces these variables with values from `params`. For example, consider the following search template: @@ -729,7 +730,7 @@ When rendered, template outputs as: [discrete] [[search-template-sections]] -=== Sections +==== Sections Sections are also a type of Mustache tags. You can use `sections` in your search template with a nested or unnested object. A section begins with `{{#my-section-variable}}` and ends with `{{/my-section-variable}}`. @@ -781,7 +782,7 @@ The template renders as: [discrete] [[search-template-lists]] -==== Lists +===== Lists You can pass a list of objects and loop over each item in your search template. For example, following search template combines <> and matches all the usernames: @@ -905,14 +906,14 @@ When rendered the template outputs: [discrete] [[search-template-lambdas]] -==== Lambdas +===== Lambdas {es} has pre-built custom functions to support converting the text into a specific format. To Learn more about usage of mustache lambdas, check out the examples in <>, <>, and <>. [discrete] [[search-template-inverted-section]] -=== Inverted sections +==== Inverted sections Inverted sections are useful when you want to set a value once. To use inverted sections use following syntax: @@ -980,7 +981,7 @@ When rendered, template output: [discrete] [[search-template-set-delimiter]] -=== Set delimiter +==== Set delimiter You can change the default delimiter: double curly brackets `{{my-variable}}` to any custom delimiter in your search template. For example, the following search template changes the default delimiter to a single round bracket `(query_string)`. @@ -1037,7 +1038,7 @@ When rendered, template outputs: [discrete] [[search-template-unsupported-features]] -=== Unsupported features +==== Unsupported features The following mustache features are not supported in {es} search templates: * Partials diff --git a/docs/reference/search/search-your-data/search-using-query-rules.asciidoc b/docs/reference/search/search-your-data/search-using-query-rules.asciidoc index 6234fdb0481dd..ccd06b6681aad 100644 --- a/docs/reference/search/search-your-data/search-using-query-rules.asciidoc +++ b/docs/reference/search/search-your-data/search-using-query-rules.asciidoc @@ -1,5 +1,5 @@ [[search-using-query-rules]] -== Searching with query rules +=== Searching with query rules ++++ Searching with query rules ++++ @@ -30,13 +30,13 @@ Rules are defined using the <> and searched u [discrete] [[query-rule-definition]] -=== Rule definition +==== Rule definition When defining a rule, consider the following: [discrete] [[query-rule-type]] -==== Rule type +===== Rule type The type of rule we want to apply. For the moment there is a single rule type: @@ -45,7 +45,7 @@ The type of rule we want to apply. For the moment there is a single rule type: [discrete] [[query-rule-criteria]] -==== Rule criteria +===== Rule criteria The criteria for which this rule will match. Criteria is defined as `type`, `metadata`, and `values`. Allowed criteria types are: @@ -88,7 +88,7 @@ Allowed criteria types are: [discrete] [[query-rule-actions]] -==== Rule actions +===== Rule actions The actions to take when the rule matches a query: @@ -101,7 +101,7 @@ See <> for details. [discrete] [[add-query-rules]] -=== Add query rules +==== Add query rules You can add query rules using the <> call. This adds a ruleset containing one or more query rules that will be applied to queries that match their specified criteria. @@ -188,7 +188,7 @@ and the <> call to delete a query ruleset. [discrete] [[rule-query-search]] -=== Perform a rule query +==== Perform a rule query Once you have defined a query ruleset, you can search this ruleset using the <> query. An example query for the `my-ruleset` defined above is: diff --git a/docs/reference/search/search-your-data/search-with-synonyms.asciidoc b/docs/reference/search/search-your-data/search-with-synonyms.asciidoc index 9da9e4a6a525d..fb6abd6d36099 100644 --- a/docs/reference/search/search-your-data/search-with-synonyms.asciidoc +++ b/docs/reference/search/search-your-data/search-with-synonyms.asciidoc @@ -71,7 +71,7 @@ A large number of inline synonyms increases cluster size unnecessarily and can l [discrete] [[synonyms-synonym-token-filters]] -=== Configure synonyms token filters and analyzers +==== Configure synonyms token filters and analyzers Once your synonyms sets are created, you can start configuring your token filters and analyzers to use them. diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index efde79ebb26aa..8bb83c3949076 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -1,4 +1,4 @@ -[[search-your-data]] +[[search-with-elasticsearch]] = Search your data [[search-query]] @@ -12,525 +12,39 @@ Depending on your data, you can use a query to get answers to questions like: * What users on my network ran `regsvr32.exe` within the last week? * What pages on my website contain a specific word or phrase? -A _search_ consists of one or more queries that are combined and sent to {es}. -Documents that match a search's queries are returned in the _hits_, or -_search results_, of the response. +{es} supports several search methods: -A search may also contain additional information used to better process its -queries. For example, a search may be limited to a specific index or only return -a specific number of results. +Search for exact values:: +Search for <> of numbers, dates, IPs, +or strings. -[discrete] -[[run-an-es-search]] -=== Run a search - -You can use the <> to search and -<> data stored in {es} data streams or indices. -The API's `query` request body parameter accepts queries written in -<>. - -The following request searches `my-index-000001` using a -<> query. This query matches documents with a -`user.id` value of `kimchy`. - -[source,console] ----- -GET /my-index-000001/_search -{ - "query": { - "match": { - "user.id": "kimchy" - } - } -} ----- -// TEST[setup:my_index] - -The API response returns the top 10 documents matching the query in the -`hits.hits` property. - -[source,console-result] ----- -{ - "took": 5, - "timed_out": false, - "_shards": { - "total": 1, - "successful": 1, - "skipped": 0, - "failed": 0 - }, - "hits": { - "total": { - "value": 1, - "relation": "eq" - }, - "max_score": 1.3862942, - "hits": [ - { - "_index": "my-index-000001", - "_id": "kxWFcnMByiguvud1Z8vC", - "_score": 1.3862942, - "_source": { - "@timestamp": "2099-11-15T14:12:12", - "http": { - "request": { - "method": "get" - }, - "response": { - "bytes": 1070000, - "status_code": 200 - }, - "version": "1.1" - }, - "message": "GET /search HTTP/1.1 200 1070000", - "source": { - "ip": "127.0.0.1" - }, - "user": { - "id": "kimchy" - } - } - } - ] - } -} ----- -// TESTRESPONSE[s/"took": 5/"took": "$body.took"/] -// TESTRESPONSE[s/"_id": "kxWFcnMByiguvud1Z8vC"/"_id": "$body.hits.hits.0._id"/] - -[discrete] -[[run-search-runtime-fields]] -=== Define fields that exist only in a query -Instead of indexing your data and then searching it, you can define -<> that only exist as part of your -search query. You specify a `runtime_mappings` section in your search request -to define the runtime field, which can optionally include a Painless script. - -For example, the following query defines a runtime field called `day_of_week`. -The included script calculates the day of the week based on the value of the -`@timestamp` field, and uses `emit` to return the calculated value. - -The query also includes a <> that operates on `day_of_week`. - -[source,console] ----- -GET /my-index-000001/_search -{ - "runtime_mappings": { - "day_of_week": { - "type": "keyword", - "script": { - "source": - """emit(doc['@timestamp'].value.dayOfWeekEnum - .getDisplayName(TextStyle.FULL, Locale.ROOT))""" - } - } - }, - "aggs": { - "day_of_week": { - "terms": { - "field": "day_of_week" - } - } - } -} ----- -// TEST[setup:my_index] - -The response includes an aggregation based on the `day_of_week` runtime field. -Under `buckets` is a `key` with a value of `Sunday`. The query dynamically -calculated this value based on the script defined in the `day_of_week` runtime -field without ever indexing the field. - -[source,console-result] ----- -{ - ... - *** - "aggregations" : { - "day_of_week" : { - "doc_count_error_upper_bound" : 0, - "sum_other_doc_count" : 0, - "buckets" : [ - { - "key" : "Sunday", - "doc_count" : 5 - } - ] - } - } -} ----- -// TESTRESPONSE[s/\.\.\./"took" : $body.took,"timed_out" : $body.timed_out,"_shards" : $body._shards,/] -// TESTRESPONSE[s/\*\*\*/"hits" : $body.hits,/] - -[discrete] -[[common-search-options]] -=== Common search options - -You can use the following options to customize your searches. - -*Query DSL* + -<> supports a variety of query types you can mix and match -to get the results you want. Query types include: - -* <> and other <>, which let you combine queries and match results based on multiple -criteria -* <> for filtering and finding exact matches -* <>, which are commonly used in search -engines -* <> and <> - -*Aggregations* + -You can use <> to get statistics and -other analytics for your search results. Aggregations help you answer questions -like: - -* What's the average response time for my servers? -* What are the top IP addresses hit by users on my network? -* What is the total transaction revenue by customer? - -*Search multiple data streams and indices* + -You can use comma-separated values and grep-like index patterns to search -several data streams and indices in the same request. You can even boost search -results from specific indices. See <>. - -*Paginate search results* + -By default, searches return only the top 10 matching hits. To retrieve -more or fewer documents, see <>. - -*Retrieve selected fields* + -The search response's `hits.hits` property includes the full document -<> for each hit. To retrieve only a subset of -the `_source` or other fields, see <>. - -*Sort search results* + -By default, search hits are sorted by `_score`, a <> that measures how well each document matches the query. To customize the -calculation of these scores, use the -<> query. To sort search hits by -other field values, see <>. - -*Run an async search* + -{es} searches are designed to run on large volumes of data quickly, often -returning results in milliseconds. For this reason, searches are -_synchronous_ by default. The search request waits for complete results before -returning a response. - -However, complete results can take longer for searches across -large data sets or <>. - -To avoid long waits, you can run an _asynchronous_, or _async_, search -instead. An <> lets you retrieve partial -results for a long-running search now and get complete results later. - -[discrete] -[[search-timeout]] -=== Search timeout - -By default, search requests don't time out. The request waits for complete -results from each shard before returning a response. - -While <> is designed for long-running -searches, you can also use the `timeout` parameter to specify a duration you'd -like to wait on each shard to complete. Each shard collects hits within the -specified time period. If collection isn't finished when the period ends, {es} -uses only the hits accumulated up to that point. The overall latency of a search -request depends on the number of shards needed for the search and the number of -concurrent shard requests. - -[source,console] ----- -GET /my-index-000001/_search -{ - "timeout": "2s", - "query": { - "match": { - "user.id": "kimchy" - } - } -} ----- -// TEST[setup:my_index] +Full-text search:: +Use <> to query <> and find documents that best match query terms. -To set a cluster-wide default timeout for all search requests, configure -`search.default_search_timeout` using the <>. This global timeout duration is used if no `timeout` argument is -passed in the request. If the global search timeout expires before the search -request finishes, the request is cancelled using <>. The `search.default_search_timeout` setting defaults to `-1` (no -timeout). +Vector search:: Store vectors in {es} and use <> to find vectors that are +similar, supporting use cases like <>. [discrete] -[[global-search-cancellation]] -=== Search cancellation - -You can cancel a search request using the <>. {es} also automatically cancels a search request when your client's HTTP -connection closes. We recommend you set up your client to close HTTP connections -when a search request is aborted or times out. - -[discrete] -[[track-total-hits]] -=== Track total hits - -Generally the total hit count can't be computed accurately without visiting all -matches, which is costly for queries that match lots of documents. The -`track_total_hits` parameter allows you to control how the total number of hits -should be tracked. -Given that it is often enough to have a lower bound of the number of hits, -such as "there are at least 10000 hits", the default is set to `10,000`. -This means that requests will count the total hit accurately up to `10,000` hits. -It is a good trade off to speed up searches if you don't need the accurate number -of hits after a certain threshold. - -When set to `true` the search response will always track the number of hits that -match the query accurately (e.g. `total.relation` will always be equal to `"eq"` -when `track_total_hits` is set to true). Otherwise the `"total.relation"` returned -in the `"total"` object in the search response determines how the `"total.value"` -should be interpreted. A value of `"gte"` means that the `"total.value"` is a -lower bound of the total hits that match the query and a value of `"eq"` indicates -that `"total.value"` is the accurate count. - -[source,console] --------------------------------------------------- -GET my-index-000001/_search -{ - "track_total_hits": true, - "query": { - "match" : { - "user.id" : "elkbee" - } - } -} --------------------------------------------------- -// TEST[setup:my_index] - -\... returns: - -[source,console-result] --------------------------------------------------- -{ - "_shards": ... - "timed_out": false, - "took": 100, - "hits": { - "max_score": 1.0, - "total" : { - "value": 2048, <1> - "relation": "eq" <2> - }, - "hits": ... - } -} --------------------------------------------------- -// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards",/] -// TESTRESPONSE[s/"took": 100/"took": $body.took/] -// TESTRESPONSE[s/"max_score": 1\.0/"max_score": $body.hits.max_score/] -// TESTRESPONSE[s/"value": 2048/"value": $body.hits.total.value/] -// TESTRESPONSE[s/"hits": \.\.\./"hits": "$body.hits.hits"/] - -<1> The total number of hits that match the query. -<2> The count is accurate (e.g. `"eq"` means equals). - -It is also possible to set `track_total_hits` to an integer. -For instance the following query will accurately track the total hit count that match -the query up to 100 documents: - -[source,console] --------------------------------------------------- -GET my-index-000001/_search -{ - "track_total_hits": 100, - "query": { - "match": { - "user.id": "elkbee" - } - } -} --------------------------------------------------- -// TEST[continued] - -The `hits.total.relation` in the response will indicate if the -value returned in `hits.total.value` is accurate (`"eq"`) or a lower -bound of the total (`"gte"`). - -For instance the following response: - -[source,console-result] --------------------------------------------------- -{ - "_shards": ... - "timed_out": false, - "took": 30, - "hits": { - "max_score": 1.0, - "total": { - "value": 42, <1> - "relation": "eq" <2> - }, - "hits": ... - } -} --------------------------------------------------- -// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards",/] -// TESTRESPONSE[s/"took": 30/"took": $body.took/] -// TESTRESPONSE[s/"max_score": 1\.0/"max_score": $body.hits.max_score/] -// TESTRESPONSE[s/"value": 42/"value": $body.hits.total.value/] -// TESTRESPONSE[s/"hits": \.\.\./"hits": "$body.hits.hits"/] - -<1> 42 documents match the query -<2> and the count is accurate (`"eq"`) - -\... indicates that the number of hits returned in the `total` -is accurate. - -If the total number of hits that match the query is greater than the -value set in `track_total_hits`, the total hits in the response -will indicate that the returned value is a lower bound: - -[source,console-result] --------------------------------------------------- -{ - "_shards": ... - "hits": { - "max_score": 1.0, - "total": { - "value": 100, <1> - "relation": "gte" <2> - }, - "hits": ... - } -} --------------------------------------------------- -// TESTRESPONSE[skip:response is already tested in the previous snippet] - -<1> There are at least 100 documents that match the query -<2> This is a lower bound (`"gte"`). - -If you don't need to track the total number of hits at all you can improve query -times by setting this option to `false`: - -[source,console] --------------------------------------------------- -GET my-index-000001/_search -{ - "track_total_hits": false, - "query": { - "match": { - "user.id": "elkbee" - } - } -} --------------------------------------------------- -// TEST[continued] - -\... returns: - -[source,console-result] --------------------------------------------------- -{ - "_shards": ... - "timed_out": false, - "took": 10, - "hits": { <1> - "max_score": 1.0, - "hits": ... - } -} --------------------------------------------------- -// TESTRESPONSE[s/"_shards": \.\.\./"_shards": "$body._shards",/] -// TESTRESPONSE[s/"took": 10/"took": $body.took/] -// TESTRESPONSE[s/"max_score": 1\.0/"max_score": $body.hits.max_score/] -// TESTRESPONSE[s/"hits": \.\.\./"hits": "$body.hits.hits"/] - -<1> The total number of hits is unknown. - -Finally you can force an accurate count by setting `"track_total_hits"` -to `true` in the request. - -[discrete] -[[quickly-check-for-matching-docs]] -=== Quickly check for matching docs - -If you only want to know if there are any documents matching a -specific query, you can set the `size` to `0` to indicate that we are not -interested in the search results. You can also set `terminate_after` to `1` -to indicate that the query execution can be terminated whenever the first -matching document was found (per shard). - -[source,console] --------------------------------------------------- -GET /_search?q=user.id:elkbee&size=0&terminate_after=1 --------------------------------------------------- -// TEST[setup:my_index] - -NOTE: `terminate_after` is always applied **after** the -<> and stops the query as well as the aggregation -executions when enough hits have been collected on the shard. Though the doc -count on aggregations may not reflect the `hits.total` in the response since -aggregations are applied **before** the post filtering. +=== Run a search -The response will not contain any hits as the `size` was set to `0`. The -`hits.total` will be either equal to `0`, indicating that there were no -matching documents, or greater than `0` meaning that there were at least -as many documents matching the query when it was early terminated. -Also if the query was terminated early, the `terminated_early` flag will -be set to `true` in the response. Some queries are able to retrieve the hits -count directly from the index statistics, which is much faster as it does -not require executing the query. In those situations, no documents are -collected, the returned `total.hits` will be higher than `terminate_after`, -and `terminated_early` will be set to `false`. +To run a search request, you can use the search API or Search Applications. -[source,console-result] --------------------------------------------------- -{ - "took": 3, - "timed_out": false, - "terminated_early": true, - "_shards": { - "total": 1, - "successful": 1, - "skipped" : 0, - "failed": 0 - }, - "hits": { - "total" : { - "value": 1, - "relation": "eq" - }, - "max_score": null, - "hits": [] - } -} --------------------------------------------------- -// TESTRESPONSE[s/"took": 3/"took": $body.took/] -// TESTRESPONSE[s/"terminated_early": true/"terminated_early": $body.terminated_early/] -// TESTRESPONSE[s/"value": 1/"value": $body.hits.total.value/] +<>:: +The <> enables you to search and +<> data stored in {es} using a query language +called the <>. -The `took` time in the response contains the milliseconds that this request -took for processing, beginning quickly after the node received the query, up -until all search related work is done and before the above JSON is returned -to the client. This means it includes the time spent waiting in thread pools, -executing a distributed search across the whole cluster and gathering all the -results. +<>:: +Search Applications enable you to leverage the full power of {es} and its Query +DSL, with a simplified user experience. Create search applications based on your +{es} indices, build queries using search templates, and easily preview your +results directly in the Kibana Search UI. -include::collapse-search-results.asciidoc[] -include::filter-search-results.asciidoc[] -include::highlighting.asciidoc[] -include::long-running-searches.asciidoc[] -include::near-real-time.asciidoc[] -include::paginate-search-results.asciidoc[] -include::retrieve-inner-hits.asciidoc[] -include::retrieve-selected-fields.asciidoc[] -include::search-across-clusters.asciidoc[] -include::search-multiple-indices.asciidoc[] -include::search-shard-routing.asciidoc[] -include::search-template.asciidoc[] -include::search-with-synonyms.asciidoc[] -include::sort-search-results.asciidoc[] +include::search-api.asciidoc[] +include::search-application-overview.asciidoc[] include::knn-search.asciidoc[] include::semantic-search.asciidoc[] -include::search-using-query-rules.asciidoc[] +include::search-across-clusters.asciidoc[] +include::search-with-synonyms.asciidoc[] \ No newline at end of file diff --git a/docs/reference/search/search-your-data/sort-search-results.asciidoc b/docs/reference/search/search-your-data/sort-search-results.asciidoc index d0e59641d5076..3e32573d7d8ae 100644 --- a/docs/reference/search/search-your-data/sort-search-results.asciidoc +++ b/docs/reference/search/search-your-data/sort-search-results.asciidoc @@ -1,5 +1,5 @@ [[sort-search-results]] -== Sort search results +=== Sort search results Allows you to add one or more sorts on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special @@ -49,7 +49,7 @@ So if you don't care about the order in which documents are returned, then you should sort by `_doc`. This especially helps when <>. [discrete] -=== Sort Values +==== Sort values The search response includes `sort` values for each document. Use the `format` parameter to specify a <> for the `sort` @@ -72,7 +72,7 @@ GET /my-index-000001/_search // TEST[continued] [discrete] -=== Sort Order +==== Sort order The `order` option can have the following values: @@ -84,7 +84,7 @@ The order defaults to `desc` when sorting on the `_score`, and defaults to `asc` when sorting on anything else. [discrete] -=== Sort mode option +==== Sort mode option Elasticsearch supports sorting by array or multi-valued fields. The `mode` option controls what array value is picked for sorting the document it belongs @@ -105,7 +105,7 @@ is picked. The default sort mode in the descending order is `max` -- the highest value is picked. [discrete] -==== Sort mode example usage +===== Sort mode example usage In the example below the field price has multiple prices per document. In this case the result hits will be sorted by price ascending based on @@ -131,7 +131,7 @@ POST /_search -------------------------------------------------- [discrete] -=== Sorting numeric fields +==== Sorting numeric fields For numeric fields it is also possible to cast the values from one type to another using the `numeric_type` option. @@ -250,7 +250,7 @@ To avoid overflow, the conversion to `date_nanos` cannot be applied on dates bef [discrete] [[nested-sorting]] -=== Sorting within nested objects. +==== Sorting within nested objects. Elasticsearch also supports sorting by fields that are inside one or more nested objects. The sorting by nested @@ -277,7 +277,7 @@ NOTE: Elasticsearch will throw an error if a nested field is defined in a sort w a `nested` context. [discrete] -==== Nested sorting examples +===== Nested sorting examples In the below example `offer` is a field of type `nested`. The nested `path` needs to be specified; otherwise, Elasticsearch doesn't know on what nested level sort values need to be captured. @@ -356,7 +356,7 @@ Nested sorting is also supported when sorting by scripts and sorting by geo distance. [discrete] -=== Missing Values +==== Missing values The `missing` parameter specifies how docs which are missing the sort field should be treated: The `missing` value can be @@ -383,7 +383,7 @@ NOTE: If a nested inner object doesn't match with the `nested.filter` then a missing value is used. [discrete] -=== Ignoring Unmapped Fields +==== Ignoring unmapped fields By default, the search request will fail if there is no mapping associated with a field. The `unmapped_type` option allows you to ignore @@ -410,7 +410,7 @@ then Elasticsearch will handle it as if there was a mapping of type [discrete] [[geo-sorting]] -=== Geo Distance Sorting +==== Geo distance sorting Allow to sort by `_geo_distance`. Here is an example, assuming `pin.location` is a field of type `geo_point`: @@ -466,7 +466,7 @@ have values for the field that is used for distance computation. The following formats are supported in providing the coordinates: [discrete] -==== Lat Lon as Properties +===== Lat lon as properties [source,console] -------------------------------------------------- @@ -491,7 +491,7 @@ GET /_search -------------------------------------------------- [discrete] -==== Lat Lon as WKT String +===== Lat lon as WKT string Format in https://docs.opengeospatial.org/is/12-063r5/12-063r5.html[Well-Known Text]. @@ -515,7 +515,7 @@ GET /_search -------------------------------------------------- [discrete] -==== Geohash +===== Geohash [source,console] -------------------------------------------------- @@ -537,7 +537,7 @@ GET /_search -------------------------------------------------- [discrete] -==== Lat Lon as Array +===== Lat lon as array Format in `[lon, lat]`, note, the order of lon/lat here in order to conform with http://geojson.org/[GeoJSON]. @@ -562,7 +562,7 @@ GET /_search -------------------------------------------------- [discrete] -=== Multiple reference points +==== Multiple reference points Multiple geo points can be passed as an array containing any `geo_point` format, for example @@ -592,7 +592,7 @@ The final distance for a document will then be `min`/`max`/`avg` (defined via `m [discrete] [[script-based-sorting]] -=== Script Based Sorting +==== Script based sorting Allow to sort based on custom scripts, here is an example: @@ -620,7 +620,7 @@ GET /_search -------------------------------------------------- [discrete] -=== Track Scores +==== Track scores When sorting on a field, scores are not computed. By setting `track_scores` to true, scores will still be computed and tracked. @@ -642,7 +642,7 @@ GET /_search -------------------------------------------------- [discrete] -=== Memory Considerations +==== Memory considerations When sorting, the relevant sorted field values are loaded into memory. This means that per shard, there should be enough memory to contain From 6f71b3ae2cdab918e47e3b22e3ae369775468705 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 10 Oct 2023 10:08:12 +0100 Subject: [PATCH 101/176] [ML] Make Elser model version configurable (#100345) Adds a service setting to make the version of the ELSER model used configurable and support ELSER v2 --- .../org/elasticsearch/TransportVersions.java | 2 +- .../inference/InferenceService.java | 13 +- .../xpack/core/ml/MachineLearningField.java | 15 ++ .../utils}/MlPlatformArchitecturesUtil.java | 23 ++-- .../MlPlatformArchitecturesUtilTests.java | 4 +- .../integration/ModelRegistryIT.java | 8 +- .../TestInferenceServicePlugin.java | 8 +- .../TransportPutInferenceModelAction.java | 49 ++++++- .../services/elser/ElserMlNodeService.java | 95 ++++++++----- .../elser/ElserMlNodeServiceSettings.java | 62 ++++++++- .../ElserMlNodeServiceSettingsTests.java | 65 ++++++++- .../elser/ElserMlNodeServiceTests.java | 129 +++++++++++------- .../xpack/ml/MachineLearning.java | 17 +-- .../ml/action/TransportMlMemoryAction.java | 2 +- .../TransportPutTrainedModelAction.java | 10 +- .../MlMemoryAutoscalingDecider.java | 6 +- .../TrainedModelAssignmentClusterService.java | 19 ++- .../deployment/DeploymentManager.java | 8 +- .../AbstractJobPersistentTasksExecutor.java | 2 +- .../ml/utils/NativeMemoryCalculator.java | 4 +- ...ortStartDataFrameAnalyticsActionTests.java | 3 +- .../MlAutoscalingDeciderServiceTests.java | 3 +- .../MlMemoryAutoscalingDeciderTests.java | 3 +- ...nedModelAssignmentClusterServiceTests.java | 3 +- .../OpenJobPersistentTasksExecutorTests.java | 3 +- .../ml/utils/NativeMemoryCalculatorTests.java | 2 +- 26 files changed, 406 insertions(+), 152 deletions(-) rename x-pack/plugin/{ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment => core/src/main/java/org/elasticsearch/xpack/core/ml/utils}/MlPlatformArchitecturesUtil.java (88%) rename x-pack/plugin/{ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment => core/src/test/java/org/elasticsearch/xpack/core/ml/utils}/MlPlatformArchitecturesUtilTests.java (98%) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 373dab307f378..6267fb3b86ae4 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -136,7 +136,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_PACKAGE_LOADER_PLATFORM_ADDED = def(8_512_00_0); public static final TransportVersion PLUGIN_DESCRIPTOR_OPTIONAL_CLASSNAME = def(8_513_00_0); public static final TransportVersion UNIVERSAL_PROFILING_LICENSE_ADDED = def(8_514_00_0); - + public static final TransportVersion ELSER_SERVICE_MODEL_VERSION_ADDED = def(8_515_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index db38ca1e037a1..8d8bc08f5b0fa 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import java.util.Map; +import java.util.Set; public interface InferenceService { @@ -29,9 +30,11 @@ public interface InferenceService { * @param modelId Model Id * @param taskType The model task type * @param config Configuration options including the secrets + * @param platfromArchitectures The Set of platform architectures (OS name and hardware architecture) + * the cluster nodes and models are running on. * @return The parsed {@link Model} */ - Model parseRequestConfig(String modelId, TaskType taskType, Map config); + Model parseRequestConfig(String modelId, TaskType taskType, Map config, Set platfromArchitectures); /** * Parse model configuration from {@code config map} from persisted storage and return the parsed {@link Model}. This requires that @@ -64,4 +67,12 @@ public interface InferenceService { * @param listener The listener */ void start(Model model, ActionListener listener); + + /** + * Return true if this model is hosted in the local Elasticsearch cluster + * @return True if in cluster + */ + default boolean isInClusterService() { + return false; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java index 8bc0fbbd979d3..3e61f6b4e9258 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java @@ -36,6 +36,21 @@ public final class MachineLearningField { Setting.Property.Dynamic, Setting.Property.NodeScope ); + + /** + * This boolean value indicates if `max_machine_memory_percent` should be ignored and an automatic calculation is used instead. + * + * This calculation takes into account total node size and the size of the JVM on that node. + * + * If the calculation fails, we fall back to `max_machine_memory_percent`. + */ + public static final Setting USE_AUTO_MACHINE_MEMORY_PERCENT = Setting.boolSetting( + "xpack.ml.use_auto_machine_memory_percent", + false, + Setting.Property.OperatorDynamic, + Setting.Property.NodeScope + ); + public static final TimeValue STATE_PERSIST_RESTORE_TIMEOUT = TimeValue.timeValueMinutes(30); public static final String ML_FEATURE_FAMILY = "machine-learning"; public static final LicensedFeature.Momentary ML_API_FEATURE = LicensedFeature.momentary( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/MlPlatformArchitecturesUtil.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlPlatformArchitecturesUtil.java similarity index 88% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/MlPlatformArchitecturesUtil.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlPlatformArchitecturesUtil.java index bd382835c2bd0..48ded3a4c0c45 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/MlPlatformArchitecturesUtil.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlPlatformArchitecturesUtil.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.deployment; +package org.elasticsearch.xpack.core.ml.utils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; @@ -16,13 +16,12 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.monitor.os.OsInfo; import org.elasticsearch.plugins.Platforms; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; -import org.elasticsearch.xpack.ml.MachineLearning; import java.util.Iterator; import java.util.Objects; import java.util.Set; +import java.util.concurrent.ExecutorService; import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; @@ -31,9 +30,13 @@ public class MlPlatformArchitecturesUtil { - public static void getMlNodesArchitecturesSet(ActionListener> architecturesListener, Client client, ThreadPool threadPool) { + public static void getMlNodesArchitecturesSet( + ActionListener> architecturesListener, + Client client, + ExecutorService executor + ) { ActionListener listener = MlPlatformArchitecturesUtil.getArchitecturesSetFromNodesInfoResponseListener( - threadPool, + executor, architecturesListener ); @@ -42,13 +45,11 @@ public static void getMlNodesArchitecturesSet(ActionListener> archit } static ActionListener getArchitecturesSetFromNodesInfoResponseListener( - ThreadPool threadPool, + ExecutorService executor, ActionListener> architecturesListener ) { return ActionListener.wrap(nodesInfoResponse -> { - threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { - architecturesListener.onResponse(getArchitecturesSetFromNodesInfoResponse(nodesInfoResponse)); - }); + executor.execute(() -> { architecturesListener.onResponse(getArchitecturesSetFromNodesInfoResponse(nodesInfoResponse)); }); }, architecturesListener::onFailure); } @@ -70,7 +71,7 @@ private static Set getArchitecturesSetFromNodesInfoResponse(NodesInfoRes public static void verifyMlNodesAndModelArchitectures( ActionListener successOrFailureListener, Client client, - ThreadPool threadPool, + ExecutorService executor, TrainedModelConfig configToReturn ) { String modelID = configToReturn.getModelId(); @@ -81,7 +82,7 @@ public static void verifyMlNodesAndModelArchitectures( successOrFailureListener.onResponse(configToReturn); }, successOrFailureListener::onFailure); - getMlNodesArchitecturesSet(architecturesListener, client, threadPool); + getMlNodesArchitecturesSet(architecturesListener, client, executor); } static void verifyMlNodesAndModelArchitectures(Set architectures, String modelPlatformArchitecture, String modelID) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/MlPlatformArchitecturesUtilTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlPlatformArchitecturesUtilTests.java similarity index 98% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/MlPlatformArchitecturesUtilTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlPlatformArchitecturesUtilTests.java index 28fc3db10cbe8..c4f3310689597 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/MlPlatformArchitecturesUtilTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlPlatformArchitecturesUtilTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.deployment; +package org.elasticsearch.xpack.core.ml.utils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -58,7 +58,7 @@ public void testGetNodesOsArchitectures() throws InterruptedException { @Override public void accept(ActionListener> setActionListener) { final ActionListener nodesInfoResponseActionListener = MlPlatformArchitecturesUtil - .getArchitecturesSetFromNodesInfoResponseListener(threadPool, setActionListener); + .getArchitecturesSetFromNodesInfoResponseListener(threadPool.executor("utility_thread"), setActionListener); nodesInfoResponseActionListener.onResponse(mockNodesInfoResponse); } diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java index 9f079afaa24e5..520a4cc5c0526 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/ModelRegistryIT.java @@ -9,11 +9,13 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ServiceSettings; import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.inference.TaskType; +import org.elasticsearch.plugins.InferenceServicePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -39,6 +41,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; public class ModelRegistryIT extends ESSingleNodeTestCase { @@ -102,8 +105,9 @@ public void testGetModel() throws Exception { UnparsedModel unparsedModel = UnparsedModel.unparsedModelFromMap(modelHolder.get().config(), modelHolder.get().secrets()); assertEquals(model.getConfigurations().getService(), unparsedModel.service()); - ElserMlNodeModel roundTripModel = ElserMlNodeService.parseConfig( - false, + + var elserService = new ElserMlNodeService(new InferenceServicePlugin.InferenceServiceFactoryContext(mock(Client.class))); + ElserMlNodeModel roundTripModel = elserService.parsePersistedConfig( unparsedModel.modelId(), unparsedModel.taskType(), unparsedModel.settings(), diff --git a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/TestInferenceServicePlugin.java b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/TestInferenceServicePlugin.java index 61837336f291b..b72fa99efaf72 100644 --- a/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/TestInferenceServicePlugin.java +++ b/x-pack/plugin/inference/src/internalClusterTest/java/org/elasticsearch/xpack/inference/integration/TestInferenceServicePlugin.java @@ -33,6 +33,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Set; import static org.elasticsearch.xpack.inference.services.MapParsingUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.MapParsingUtils.throwIfNotEmptyMap; @@ -79,7 +80,12 @@ public String name() { } @Override - public TestServiceModel parseRequestConfig(String modelId, TaskType taskType, Map config) { + public TestServiceModel parseRequestConfig( + String modelId, + TaskType taskType, + Map config, + Set platfromArchitectures + ) { Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); var serviceSettings = TestServiceSettings.fromMap(serviceSettingsMap); var secretSettings = TestSecretSettings.fromMap(serviceSettingsMap); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java index b0995e5405b2f..58f781d99b26a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java @@ -11,28 +11,35 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InferenceServiceRegistry; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xpack.core.ml.MachineLearningField; +import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; +import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import java.io.IOException; import java.util.Map; +import java.util.Set; public class TransportPutInferenceModelAction extends TransportMasterNodeAction< PutInferenceModelAction.Request, @@ -40,6 +47,7 @@ public class TransportPutInferenceModelAction extends TransportMasterNodeAction< private final ModelRegistry modelRegistry; private final InferenceServiceRegistry serviceRegistry; + private final Client client; @Inject public TransportPutInferenceModelAction( @@ -49,7 +57,8 @@ public TransportPutInferenceModelAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ModelRegistry modelRegistry, - InferenceServiceRegistry serviceRegistry + InferenceServiceRegistry serviceRegistry, + Client client ) { super( PutInferenceModelAction.NAME, @@ -64,6 +73,7 @@ public TransportPutInferenceModelAction( ); this.modelRegistry = modelRegistry; this.serviceRegistry = serviceRegistry; + this.client = client; } @Override @@ -87,12 +97,33 @@ protected void masterOperation( return; } - var model = service.get().parseRequestConfig(request.getModelId(), request.getTaskType(), requestAsMap); + if (service.get().isInClusterService()) { + // Find the cluster platform as the service may need that + // information when creating the model + MlPlatformArchitecturesUtil.getMlNodesArchitecturesSet(ActionListener.wrap(architectures -> { + if (architectures.isEmpty() && clusterIsInElasticCloud(clusterService.getClusterSettings())) { + // In Elastic cloud ml nodes run on Linux x86 + architectures = Set.of("linux-x86_64"); + parseAndStoreModel(service.get(), request.getModelId(), request.getTaskType(), requestAsMap, architectures, listener); + } + }, listener::onFailure), client, threadPool.executor(InferencePlugin.UTILITY_THREAD_POOL_NAME)); + } else { + // Not an in cluster service, it does not care about the cluster platform + parseAndStoreModel(service.get(), request.getModelId(), request.getTaskType(), requestAsMap, Set.of(), listener); + } + } + + private void parseAndStoreModel( + InferenceService service, + String modelId, + TaskType taskType, + Map config, + Set platfromArchitectures, + ActionListener listener + ) { + var model = service.parseRequestConfig(modelId, taskType, config, platfromArchitectures); // model is valid good to persist then start - this.modelRegistry.storeModel( - model, - ActionListener.wrap(r -> { startModel(service.get(), model, listener); }, listener::onFailure) - ); + this.modelRegistry.storeModel(model, ActionListener.wrap(r -> { startModel(service, model, listener); }, listener::onFailure)); } private static void startModel(InferenceService service, Model model, ActionListener listener) { @@ -121,4 +152,10 @@ private Map requestToMap(PutInferenceModelAction.Request request protected ClusterBlockException checkBlock(PutInferenceModelAction.Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } + + static boolean clusterIsInElasticCloud(ClusterSettings settings) { + // use a heuristic to determine if in Elastic cloud. + // One such heuristic is where USE_AUTO_MACHINE_MEMORY_PERCENT == true + return settings.get(MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT); + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java index f8e8584a6a382..907c76e02f53c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; +import java.util.Set; import static org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus.State.STARTED; import static org.elasticsearch.xpack.inference.services.MapParsingUtils.removeFromMapOrThrowIfNull; @@ -35,46 +36,64 @@ public class ElserMlNodeService implements InferenceService { public static final String NAME = "elser_mlnode"; - private static final String ELSER_V1_MODEL = ".elser_model_1"; + static final String ELSER_V1_MODEL = ".elser_model_1"; + // Default non platform specific v2 model + static final String ELSER_V2_MODEL = ".elser_model_2"; + static final String ELSER_V2_MODEL_LINUX_X86 = ".elser_model_2_linux-x86_64"; - public static ElserMlNodeModel parseConfig( - boolean throwOnUnknownFields, + public static Set VALID_ELSER_MODELS = Set.of( + ElserMlNodeService.ELSER_V1_MODEL, + ElserMlNodeService.ELSER_V2_MODEL, + ElserMlNodeService.ELSER_V2_MODEL_LINUX_X86 + ); + + private final OriginSettingClient client; + + public ElserMlNodeService(InferenceServicePlugin.InferenceServiceFactoryContext context) { + this.client = new OriginSettingClient(context.client(), ClientHelper.INFERENCE_ORIGIN); + } + + public boolean isInClusterService() { + return true; + } + + @Override + public ElserMlNodeModel parseRequestConfig( String modelId, TaskType taskType, - Map settings, - Map secrets + Map config, + Set modelArchitectures ) { - Map serviceSettingsMap = removeFromMapOrThrowIfNull(settings, ModelConfigurations.SERVICE_SETTINGS); - var serviceSettings = serviceSettingsFromMap(serviceSettingsMap); + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + var serviceSettingsBuilder = ElserMlNodeServiceSettings.fromMap(serviceSettingsMap); + + // choose a default model version based on the cluster architecture + if (serviceSettingsBuilder.getModelVariant() == null) { + boolean homogenous = modelArchitectures.size() == 1; + if (homogenous && modelArchitectures.iterator().next().equals("linux-x86_64")) { + // Use the hardware optimized model + serviceSettingsBuilder.setModelVariant(ELSER_V2_MODEL_LINUX_X86); + } else { + // default to the platform-agnostic model + serviceSettingsBuilder.setModelVariant(ELSER_V2_MODEL); + } + } Map taskSettingsMap; // task settings are optional - if (settings.containsKey(ModelConfigurations.TASK_SETTINGS)) { - taskSettingsMap = removeFromMapOrThrowIfNull(settings, ModelConfigurations.TASK_SETTINGS); + if (config.containsKey(ModelConfigurations.TASK_SETTINGS)) { + taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); } else { taskSettingsMap = Map.of(); } var taskSettings = taskSettingsFromMap(taskType, taskSettingsMap); - if (throwOnUnknownFields) { - throwIfNotEmptyMap(settings, NAME); - throwIfNotEmptyMap(serviceSettingsMap, NAME); - throwIfNotEmptyMap(taskSettingsMap, NAME); - } + throwIfNotEmptyMap(config, NAME); + throwIfNotEmptyMap(serviceSettingsMap, NAME); + throwIfNotEmptyMap(taskSettingsMap, NAME); - return new ElserMlNodeModel(modelId, taskType, NAME, serviceSettings, taskSettings); - } - - private final OriginSettingClient client; - - public ElserMlNodeService(InferenceServicePlugin.InferenceServiceFactoryContext context) { - this.client = new OriginSettingClient(context.client(), ClientHelper.INFERENCE_ORIGIN); - } - - @Override - public ElserMlNodeModel parseRequestConfig(String modelId, TaskType taskType, Map config) { - return parseConfig(true, modelId, taskType, config, config); + return new ElserMlNodeModel(modelId, taskType, NAME, serviceSettingsBuilder.build(), taskSettings); } @Override @@ -84,7 +103,20 @@ public ElserMlNodeModel parsePersistedConfig( Map config, Map secrets ) { - return parseConfig(false, modelId, taskType, config, secrets); + Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + var serviceSettingsBuilder = ElserMlNodeServiceSettings.fromMap(serviceSettingsMap); + + Map taskSettingsMap; + // task settings are optional + if (config.containsKey(ModelConfigurations.TASK_SETTINGS)) { + taskSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.TASK_SETTINGS); + } else { + taskSettingsMap = Map.of(); + } + + var taskSettings = taskSettingsFromMap(taskType, taskSettingsMap); + + return new ElserMlNodeModel(modelId, taskType, NAME, serviceSettingsBuilder.build(), taskSettings); } @Override @@ -106,7 +138,10 @@ public void start(Model model, ActionListener listener) { var elserModel = (ElserMlNodeModel) model; var serviceSettings = elserModel.getServiceSettings(); - var startRequest = new StartTrainedModelDeploymentAction.Request(ELSER_V1_MODEL, model.getConfigurations().getModelId()); + var startRequest = new StartTrainedModelDeploymentAction.Request( + serviceSettings.getModelVariant(), + model.getConfigurations().getModelId() + ); startRequest.setNumberOfAllocations(serviceSettings.getNumAllocations()); startRequest.setThreadsPerAllocation(serviceSettings.getNumThreads()); startRequest.setWaitForState(STARTED); @@ -144,10 +179,6 @@ public void infer(Model model, String input, Map taskSettings, A }, listener::onFailure)); } - private static ElserMlNodeServiceSettings serviceSettingsFromMap(Map config) { - return ElserMlNodeServiceSettings.fromMap(config); - } - private static ElserMlNodeTaskSettings taskSettingsFromMap(TaskType taskType, Map config) { if (taskType != TaskType.SPARSE_EMBEDDING) { throw new ElasticsearchStatusException(TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), RestStatus.BAD_REQUEST); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettings.java index 42cb491c76204..7dffbc693ca51 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettings.java @@ -26,9 +26,11 @@ public class ElserMlNodeServiceSettings implements ServiceSettings { public static final String NAME = "elser_mlnode_service_settings"; public static final String NUM_ALLOCATIONS = "num_allocations"; public static final String NUM_THREADS = "num_threads"; + public static final String MODEL_VERSION = "model_version"; private final int numAllocations; private final int numThreads; + private final String modelVariant; /** * Parse the Elser service setting from map and validate the setting values. @@ -39,9 +41,8 @@ public class ElserMlNodeServiceSettings implements ServiceSettings { * @param map Source map containg the config * @return The {@code ElserMlNodeServiceSettings} */ - public static ElserMlNodeServiceSettings fromMap(Map map) { + public static ElserMlNodeServiceSettings.Builder fromMap(Map map) { ValidationException validationException = new ValidationException(); - Integer numAllocations = MapParsingUtils.removeAsType(map, NUM_ALLOCATIONS, Integer.class); Integer numThreads = MapParsingUtils.removeAsType(map, NUM_THREADS, Integer.class); @@ -61,21 +62,36 @@ public static ElserMlNodeServiceSettings fromMap(Map map) { validationException.addValidationError(mustBeAPositiveNumberError(NUM_THREADS, numThreads)); } + String version = MapParsingUtils.removeAsType(map, MODEL_VERSION, String.class); + if (version != null && ElserMlNodeService.VALID_ELSER_MODELS.contains(version) == false) { + validationException.addValidationError("unknown ELSER model version [" + version + "]"); + } + if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - return new ElserMlNodeServiceSettings(numAllocations, numThreads); + var builder = new Builder(); + builder.setNumAllocations(numAllocations); + builder.setNumThreads(numThreads); + builder.setModelVariant(version); + return builder; } - public ElserMlNodeServiceSettings(int numAllocations, int numThreads) { + public ElserMlNodeServiceSettings(int numAllocations, int numThreads, String variant) { this.numAllocations = numAllocations; this.numThreads = numThreads; + this.modelVariant = Objects.requireNonNull(variant); } public ElserMlNodeServiceSettings(StreamInput in) throws IOException { numAllocations = in.readVInt(); numThreads = in.readVInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ELSER_SERVICE_MODEL_VERSION_ADDED)) { + modelVariant = in.readString(); + } else { + modelVariant = ElserMlNodeService.ELSER_V1_MODEL; + } } public int getNumAllocations() { @@ -86,11 +102,16 @@ public int getNumThreads() { return numThreads; } + public String getModelVariant() { + return modelVariant; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(NUM_ALLOCATIONS, numAllocations); builder.field(NUM_THREADS, numThreads); + builder.field(MODEL_VERSION, modelVariant); builder.endObject(); return builder; } @@ -109,11 +130,14 @@ public TransportVersion getMinimalSupportedVersion() { public void writeTo(StreamOutput out) throws IOException { out.writeVInt(numAllocations); out.writeVInt(numThreads); + if (out.getTransportVersion().onOrAfter(TransportVersions.ELSER_SERVICE_MODEL_VERSION_ADDED)) { + out.writeString(modelVariant); + } } @Override public int hashCode() { - return Objects.hash(numAllocations, numThreads); + return Objects.hash(numAllocations, numThreads, modelVariant); } @Override @@ -121,10 +145,36 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ElserMlNodeServiceSettings that = (ElserMlNodeServiceSettings) o; - return numAllocations == that.numAllocations && numThreads == that.numThreads; + return numAllocations == that.numAllocations && numThreads == that.numThreads && Objects.equals(modelVariant, that.modelVariant); } private static String mustBeAPositiveNumberError(String settingName, int value) { return "Invalid value [" + value + "]. [" + settingName + "] must be a positive integer"; } + + public static class Builder { + private int numAllocations; + private int numThreads; + private String modelVariant; + + public void setNumAllocations(int numAllocations) { + this.numAllocations = numAllocations; + } + + public void setNumThreads(int numThreads) { + this.numThreads = numThreads; + } + + public void setModelVariant(String modelVariant) { + this.modelVariant = modelVariant; + } + + public String getModelVariant() { + return modelVariant; + } + + public ElserMlNodeServiceSettings build() { + return new ElserMlNodeServiceSettings(numAllocations, numThreads, modelVariant); + } + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettingsTests.java index 5ffc2347b63e6..35d5c0b8e9603 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettingsTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import static org.hamcrest.Matchers.containsString; @@ -19,14 +20,53 @@ public class ElserMlNodeServiceSettingsTests extends AbstractWireSerializingTestCase { public static ElserMlNodeServiceSettings createRandom() { - return new ElserMlNodeServiceSettings(randomIntBetween(1, 4), randomIntBetween(1, 2)); + return new ElserMlNodeServiceSettings( + randomIntBetween(1, 4), + randomIntBetween(1, 2), + randomFrom(ElserMlNodeService.VALID_ELSER_MODELS) + ); + } + + public void testFromMap_DefaultModelVersion() { + var serviceSettingsBuilder = ElserMlNodeServiceSettings.fromMap( + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + assertNull(serviceSettingsBuilder.getModelVariant()); } public void testFromMap() { var serviceSettings = ElserMlNodeServiceSettings.fromMap( - new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + new HashMap<>( + Map.of( + ElserMlNodeServiceSettings.NUM_ALLOCATIONS, + 1, + ElserMlNodeServiceSettings.NUM_THREADS, + 4, + ElserMlNodeServiceSettings.MODEL_VERSION, + ".elser_model_1" + ) + ) + ).build(); + assertEquals(new ElserMlNodeServiceSettings(1, 4, ".elser_model_1"), serviceSettings); + } + + public void testFromMapInvalidVersion() { + var e = expectThrows( + ValidationException.class, + () -> ElserMlNodeServiceSettings.fromMap( + new HashMap<>( + Map.of( + ElserMlNodeServiceSettings.NUM_ALLOCATIONS, + 1, + ElserMlNodeServiceSettings.NUM_THREADS, + 4, + "model_version", + ".elser_model_27" + ) + ) + ) ); - assertEquals(new ElserMlNodeServiceSettings(1, 4), serviceSettings); + assertThat(e.getMessage(), containsString("unknown ELSER model version [.elser_model_27]")); } public void testFromMapMissingOptions() { @@ -67,9 +107,22 @@ protected ElserMlNodeServiceSettings createTestInstance() { @Override protected ElserMlNodeServiceSettings mutateInstance(ElserMlNodeServiceSettings instance) { - return switch (randomIntBetween(0, 1)) { - case 0 -> new ElserMlNodeServiceSettings(instance.getNumAllocations() + 1, instance.getNumThreads()); - case 1 -> new ElserMlNodeServiceSettings(instance.getNumAllocations(), instance.getNumThreads() + 1); + return switch (randomIntBetween(0, 2)) { + case 0 -> new ElserMlNodeServiceSettings( + instance.getNumAllocations() + 1, + instance.getNumThreads(), + instance.getModelVariant() + ); + case 1 -> new ElserMlNodeServiceSettings( + instance.getNumAllocations(), + instance.getNumThreads() + 1, + instance.getModelVariant() + ); + case 2 -> { + var versions = new HashSet<>(ElserMlNodeService.VALID_ELSER_MODELS); + versions.remove(instance.getModelVariant()); + yield new ElserMlNodeServiceSettings(instance.getNumAllocations(), instance.getNumThreads(), versions.iterator().next()); + } default -> throw new IllegalStateException(); }; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java index 1ab580eec358b..6348e1d7d4f98 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java @@ -18,6 +18,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; import static org.hamcrest.Matchers.containsString; import static org.mockito.Mockito.mock; @@ -43,18 +44,27 @@ public void testParseConfigStrict() { var settings = new HashMap(); settings.put( ModelConfigurations.SERVICE_SETTINGS, - new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + new HashMap<>( + Map.of( + ElserMlNodeServiceSettings.NUM_ALLOCATIONS, + 1, + ElserMlNodeServiceSettings.NUM_THREADS, + 4, + "model_version", + ".elser_model_1" + ) + ) ); settings.put(ModelConfigurations.TASK_SETTINGS, Map.of()); - ElserMlNodeModel parsedModel = service.parseRequestConfig("foo", TaskType.SPARSE_EMBEDDING, settings); + ElserMlNodeModel parsedModel = service.parseRequestConfig("foo", TaskType.SPARSE_EMBEDDING, settings, Set.of()); assertEquals( new ElserMlNodeModel( "foo", TaskType.SPARSE_EMBEDDING, ElserMlNodeService.NAME, - new ElserMlNodeServiceSettings(1, 4), + new ElserMlNodeServiceSettings(1, 4, ".elser_model_1"), ElserMlNodeTaskSettings.DEFAULT ), parsedModel @@ -70,14 +80,14 @@ public void testParseConfigStrictWithNoTaskSettings() { new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) ); - ElserMlNodeModel parsedModel = service.parseRequestConfig("foo", TaskType.SPARSE_EMBEDDING, settings); + ElserMlNodeModel parsedModel = service.parseRequestConfig("foo", TaskType.SPARSE_EMBEDDING, settings, Set.of()); assertEquals( new ElserMlNodeModel( "foo", TaskType.SPARSE_EMBEDDING, ElserMlNodeService.NAME, - new ElserMlNodeServiceSettings(1, 4), + new ElserMlNodeServiceSettings(1, 4, ElserMlNodeService.ELSER_V2_MODEL), ElserMlNodeTaskSettings.DEFAULT ), parsedModel @@ -86,12 +96,23 @@ public void testParseConfigStrictWithNoTaskSettings() { public void testParseConfigStrictWithUnknownSettings() { + var service = createService(mock(Client.class)); + for (boolean throwOnUnknown : new boolean[] { true, false }) { { var settings = new HashMap(); settings.put( ModelConfigurations.SERVICE_SETTINGS, - new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + new HashMap<>( + Map.of( + ElserMlNodeServiceSettings.NUM_ALLOCATIONS, + 1, + ElserMlNodeServiceSettings.NUM_THREADS, + 4, + ElserMlNodeServiceSettings.MODEL_VERSION, + ".elser_model_2" + ) + ) ); settings.put(ModelConfigurations.TASK_SETTINGS, Map.of()); settings.put("foo", "bar"); @@ -99,26 +120,14 @@ public void testParseConfigStrictWithUnknownSettings() { if (throwOnUnknown) { var e = expectThrows( ElasticsearchStatusException.class, - () -> ElserMlNodeService.parseConfig( - throwOnUnknown, - "foo", - TaskType.SPARSE_EMBEDDING, - settings, - Collections.emptyMap() - ) + () -> service.parseRequestConfig("foo", TaskType.SPARSE_EMBEDDING, settings, Set.of()) ); assertThat( e.getMessage(), containsString("Model configuration contains settings [{foo=bar}] unknown to the [elser_mlnode] service") ); } else { - var parsed = ElserMlNodeService.parseConfig( - throwOnUnknown, - "foo", - TaskType.SPARSE_EMBEDDING, - settings, - Collections.emptyMap() - ); + var parsed = service.parsePersistedConfig("foo", TaskType.SPARSE_EMBEDDING, settings, Collections.emptyMap()); } } @@ -126,33 +135,30 @@ public void testParseConfigStrictWithUnknownSettings() { var settings = new HashMap(); settings.put( ModelConfigurations.SERVICE_SETTINGS, - new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + new HashMap<>( + Map.of( + ElserMlNodeServiceSettings.NUM_ALLOCATIONS, + 1, + ElserMlNodeServiceSettings.NUM_THREADS, + 4, + ElserMlNodeServiceSettings.MODEL_VERSION, + ".elser_model_2" + ) + ) ); settings.put(ModelConfigurations.TASK_SETTINGS, Map.of("foo", "bar")); if (throwOnUnknown) { var e = expectThrows( ElasticsearchStatusException.class, - () -> ElserMlNodeService.parseConfig( - throwOnUnknown, - "foo", - TaskType.SPARSE_EMBEDDING, - settings, - Collections.emptyMap() - ) + () -> service.parseRequestConfig("foo", TaskType.SPARSE_EMBEDDING, settings, Set.of()) ); assertThat( e.getMessage(), containsString("Model configuration contains settings [{foo=bar}] unknown to the [elser_mlnode] service") ); } else { - var parsed = ElserMlNodeService.parseConfig( - throwOnUnknown, - "foo", - TaskType.SPARSE_EMBEDDING, - settings, - Collections.emptyMap() - ); + var parsed = service.parsePersistedConfig("foo", TaskType.SPARSE_EMBEDDING, settings, Collections.emptyMap()); } } @@ -161,38 +167,61 @@ public void testParseConfigStrictWithUnknownSettings() { settings.put( ModelConfigurations.SERVICE_SETTINGS, new HashMap<>( - Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4, "foo", "bar") + Map.of( + ElserMlNodeServiceSettings.NUM_ALLOCATIONS, + 1, + ElserMlNodeServiceSettings.NUM_THREADS, + 4, + ElserMlNodeServiceSettings.MODEL_VERSION, + ".elser_model_2", + "foo", + "bar" + ) ) ); if (throwOnUnknown) { var e = expectThrows( ElasticsearchStatusException.class, - () -> ElserMlNodeService.parseConfig( - throwOnUnknown, - "foo", - TaskType.SPARSE_EMBEDDING, - settings, - Collections.emptyMap() - ) + () -> service.parseRequestConfig("foo", TaskType.SPARSE_EMBEDDING, settings, Set.of()) ); assertThat( e.getMessage(), containsString("Model configuration contains settings [{foo=bar}] unknown to the [elser_mlnode] service") ); } else { - var parsed = ElserMlNodeService.parseConfig( - throwOnUnknown, - "foo", - TaskType.SPARSE_EMBEDDING, - settings, - Collections.emptyMap() - ); + var parsed = service.parsePersistedConfig("foo", TaskType.SPARSE_EMBEDDING, settings, Collections.emptyMap()); } } } } + public void testParseRequestConfig_DefaultModel() { + var service = createService(mock(Client.class)); + { + var settings = new HashMap(); + settings.put( + ModelConfigurations.SERVICE_SETTINGS, + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + + ElserMlNodeModel parsedModel = service.parseRequestConfig("foo", TaskType.SPARSE_EMBEDDING, settings, Set.of()); + + assertEquals(".elser_model_2", parsedModel.getServiceSettings().getModelVariant()); + } + { + var settings = new HashMap(); + settings.put( + ModelConfigurations.SERVICE_SETTINGS, + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + + ElserMlNodeModel parsedModel = service.parseRequestConfig("foo", TaskType.SPARSE_EMBEDDING, settings, Set.of("linux-x86_64")); + + assertEquals(".elser_model_2_linux-x86_64", parsedModel.getServiceSettings().getModelVariant()); + } + } + private ElserMlNodeService createService(Client client) { var context = new InferenceServicePlugin.InferenceServiceFactoryContext(client); return new ElserMlNodeService(context); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 323f2661d1098..a7bbbbcffb8b3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -624,6 +624,7 @@ public void loadExtensions(ExtensionLoader loader) { // Settings higher than available memory are only recommended for OEM type situations where a wrapper tightly // controls the types of jobs that can be created, and each job alone is considerably smaller than what each node // can handle. + // See also {@link MachineLearningField#USE_AUTO_MACHINE_MEMORY_PERCENT} public static final Setting MAX_MACHINE_MEMORY_PERCENT = Setting.intSetting( "xpack.ml.max_machine_memory_percent", 30, @@ -632,19 +633,7 @@ public void loadExtensions(ExtensionLoader loader) { Property.OperatorDynamic, Property.NodeScope ); - /** - * This boolean value indicates if `max_machine_memory_percent` should be ignored and a automatic calculation is used instead. - * - * This calculation takes into account total node size and the size of the JVM on that node. - * - * If the calculation fails, we fall back to `max_machine_memory_percent`. - */ - public static final Setting USE_AUTO_MACHINE_MEMORY_PERCENT = Setting.boolSetting( - "xpack.ml.use_auto_machine_memory_percent", - false, - Property.OperatorDynamic, - Property.NodeScope - ); + public static final Setting MAX_LAZY_ML_NODES = Setting.intSetting( "xpack.ml.max_lazy_ml_nodes", 0, @@ -798,7 +787,7 @@ public List> getSettings() { ModelLoadingService.INFERENCE_MODEL_CACHE_TTL, ResultsPersisterService.PERSIST_RESULTS_MAX_RETRIES, NIGHTLY_MAINTENANCE_REQUESTS_PER_SECOND, - USE_AUTO_MACHINE_MEMORY_PERCENT, + MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT, MAX_ML_NODE_SIZE, DELAYED_DATA_CHECK_FREQ ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlMemoryAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlMemoryAction.java index ff468a9fcac83..0265faaeeb1d6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlMemoryAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlMemoryAction.java @@ -46,9 +46,9 @@ import java.util.Optional; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_MACHINE_MEMORY_PERCENT; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_OPEN_JOBS_PER_NODE; -import static org.elasticsearch.xpack.ml.MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT; public class TransportMlMemoryAction extends TransportMasterNodeAction { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java index 93f34a840bdf7..708c8ee285896 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java @@ -70,9 +70,10 @@ import org.elasticsearch.xpack.core.ml.packageloader.action.GetTrainedModelPackageConfigAction; import org.elasticsearch.xpack.core.ml.packageloader.action.LoadTrainedModelPackageAction; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; +import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; -import org.elasticsearch.xpack.ml.inference.deployment.MlPlatformArchitecturesUtil; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.utils.TaskRetriever; @@ -383,7 +384,12 @@ void callVerifyMlNodesAndModelArchitectures( Client client, ThreadPool threadPool ) { - MlPlatformArchitecturesUtil.verifyMlNodesAndModelArchitectures(failureListener, client, threadPool, configToReturn); + MlPlatformArchitecturesUtil.verifyMlNodesAndModelArchitectures( + failureListener, + client, + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME), + configToReturn + ); } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java index 0ebea7956a661..4ff7e66d296d0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java @@ -21,6 +21,7 @@ import org.elasticsearch.logging.Logger; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingDeciderContext; +import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; @@ -88,11 +89,12 @@ class MlMemoryAutoscalingDecider { this.maxMachineMemoryPercent = MAX_MACHINE_MEMORY_PERCENT.get(settings); this.maxOpenJobs = MAX_OPEN_JOBS_PER_NODE.get(settings); - this.useAuto = MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT.get(settings); + this.useAuto = MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT.get(settings); setMaxMlNodeSize(MachineLearning.MAX_ML_NODE_SIZE.get(settings)); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_MACHINE_MEMORY_PERCENT, this::setMaxMachineMemoryPercent); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_JOBS_PER_NODE, this::setMaxOpenJobs); - clusterService.getClusterSettings().addSettingsUpdateConsumer(MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT, this::setUseAuto); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT, this::setUseAuto); clusterService.getClusterSettings().addSettingsUpdateConsumer(MachineLearning.MAX_ML_NODE_SIZE, this::setMaxMlNodeSize); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java index ea52c4918d05b..7ecb580d5feac 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java @@ -35,6 +35,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; @@ -45,10 +46,10 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.autoscaling.NodeAvailabilityZoneMapper; import org.elasticsearch.xpack.ml.inference.assignment.planning.AllocationReducer; -import org.elasticsearch.xpack.ml.inference.deployment.MlPlatformArchitecturesUtil; import org.elasticsearch.xpack.ml.job.NodeLoad; import org.elasticsearch.xpack.ml.job.NodeLoadDetector; import org.elasticsearch.xpack.ml.notifications.SystemAuditor; @@ -103,7 +104,7 @@ public TrainedModelAssignmentClusterService( this.systemAuditor = Objects.requireNonNull(systemAuditor); this.nodeAvailabilityZoneMapper = Objects.requireNonNull(nodeAvailabilityZoneMapper); this.maxMemoryPercentage = MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(settings); - this.useAuto = MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT.get(settings); + this.useAuto = MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT.get(settings); this.maxOpenJobs = MachineLearning.MAX_OPEN_JOBS_PER_NODE.get(settings); this.maxLazyMLNodes = MachineLearning.MAX_LAZY_ML_NODES.get(settings); this.maxMLNodeSize = MachineLearning.MAX_ML_NODE_SIZE.get(settings).getBytes(); @@ -115,7 +116,7 @@ public TrainedModelAssignmentClusterService( clusterService.getClusterSettings() .addSettingsUpdateConsumer(MachineLearning.MAX_MACHINE_MEMORY_PERCENT, this::setMaxMemoryPercentage); clusterService.getClusterSettings() - .addSettingsUpdateConsumer(MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT, this::setUseAuto); + .addSettingsUpdateConsumer(MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT, this::setUseAuto); clusterService.getClusterSettings().addSettingsUpdateConsumer(MachineLearning.MAX_OPEN_JOBS_PER_NODE, this::setMaxOpenJobs); clusterService.getClusterSettings().addSettingsUpdateConsumer(MachineLearning.MAX_LAZY_ML_NODES, this::setMaxLazyMLNodes); clusterService.getClusterSettings().addSettingsUpdateConsumer(MachineLearning.MAX_ML_NODE_SIZE, this::setMaxMLNodeSize); @@ -206,7 +207,11 @@ boolean eventStateHasGlobalBlockStateNotRecoveredBlock(ClusterChangedEvent event void logMlNodeHeterogeneity() { ActionListener> architecturesListener = getArchitecturesSetActionListener(); - MlPlatformArchitecturesUtil.getMlNodesArchitecturesSet(architecturesListener, client, threadPool); + MlPlatformArchitecturesUtil.getMlNodesArchitecturesSet( + architecturesListener, + client, + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) + ); } static ActionListener> getArchitecturesSetActionListener() { @@ -590,7 +595,11 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) }); }, listener::onFailure); - MlPlatformArchitecturesUtil.getMlNodesArchitecturesSet(architecturesListener, client, threadPool); + MlPlatformArchitecturesUtil.getMlNodesArchitecturesSet( + architecturesListener, + client, + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) + ); } ClusterState stopPlatformSpecificModelsInHeterogeneousClusters( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index fcb44d0f391fe..a8c449ec5d8e7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -40,6 +40,7 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.VocabularyConfig; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.inference.nlp.NlpTask; import org.elasticsearch.xpack.ml.inference.nlp.Vocabulary; @@ -255,7 +256,12 @@ void callVerifyMlNodesAndModelArchitectures( Client client, ThreadPool threadPool ) { - MlPlatformArchitecturesUtil.verifyMlNodesAndModelArchitectures(configToReturnListener, client, threadPool, configToReturn); + MlPlatformArchitecturesUtil.verifyMlNodesAndModelArchitectures( + configToReturnListener, + client, + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME), + configToReturn + ); } private SearchRequest vocabSearchRequest(VocabularyConfig vocabularyConfig, String modelId) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/task/AbstractJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/task/AbstractJobPersistentTasksExecutor.java index d205fa8fe97b8..a9f55ee5f5960 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/task/AbstractJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/task/AbstractJobPersistentTasksExecutor.java @@ -36,12 +36,12 @@ import java.util.Objects; import java.util.Optional; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT; import static org.elasticsearch.xpack.core.ml.MlTasks.AWAITING_UPGRADE; import static org.elasticsearch.xpack.core.ml.MlTasks.RESET_IN_PROGRESS; import static org.elasticsearch.xpack.core.ml.job.messages.Messages.JOB_AUDIT_REQUIRES_MORE_MEMORY_TO_RUN; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_ML_NODE_SIZE; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_OPEN_JOBS_PER_NODE; -import static org.elasticsearch.xpack.ml.MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT; import static org.elasticsearch.xpack.ml.job.JobNodeSelector.AWAITING_LAZY_ASSIGNMENT; public abstract class AbstractJobPersistentTasksExecutor extends PersistentTasksExecutor { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/NativeMemoryCalculator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/NativeMemoryCalculator.java index 4920e94ee1f08..020f1aae29427 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/NativeMemoryCalculator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/NativeMemoryCalculator.java @@ -22,12 +22,12 @@ import java.util.OptionalLong; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT; import static org.elasticsearch.xpack.ml.MachineLearning.MACHINE_MEMORY_NODE_ATTR; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_JVM_SIZE_NODE_ATTR; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_LAZY_ML_NODES; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_MACHINE_MEMORY_PERCENT; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_ML_NODE_SIZE; -import static org.elasticsearch.xpack.ml.MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT; public final class NativeMemoryCalculator { @@ -291,7 +291,7 @@ public static ByteSizeValue getMaxModelMemoryLimit(ClusterService clusterService } // When the ML memory percent is being set automatically and no explicit max model memory limit is set, // max model memory limit is considered to be the max model memory limit that will fit in the cluster - Boolean autoMemory = clusterSettings.get(MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT); + Boolean autoMemory = clusterSettings.get(MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT); if (autoMemory) { DiscoveryNodes nodes = clusterService.state().getNodes(); ByteSizeValue modelMemoryLimitToFit = calculateMaxModelMemoryLimitToFit(clusterSettings, nodes); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java index 5b44f9f95e980..7f7469192b877 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction.TaskParams; @@ -126,7 +127,7 @@ private static TaskExecutor createTaskExecutor() { Sets.newHashSet( MachineLearning.CONCURRENT_JOB_ALLOCATIONS, MachineLearning.MAX_MACHINE_MEMORY_PERCENT, - MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT, + MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT, MachineLearning.MAX_ML_NODE_SIZE, MachineLearning.MAX_LAZY_ML_NODES, MachineLearning.MAX_OPEN_JOBS_PER_NODE diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java index ee9389a251ede..63db389f25487 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingCapacity; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingDeciderContext; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingDeciderResult; +import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.NodeLoad; @@ -130,7 +131,7 @@ public void setup() { Set.of( MachineLearning.MAX_MACHINE_MEMORY_PERCENT, MachineLearning.MAX_OPEN_JOBS_PER_NODE, - MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT, + MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT, MachineLearning.MAX_ML_NODE_SIZE, MachineLearning.ALLOCATED_PROCESSORS_SCALE, AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java index cf986c3cc5709..00a9f8cb30110 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingCapacity; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingDeciderContext; +import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -160,7 +161,7 @@ public void setup() { Set.of( MachineLearning.MAX_MACHINE_MEMORY_PERCENT, MachineLearning.MAX_OPEN_JOBS_PER_NODE, - MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT, + MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT, MachineLearning.MAX_ML_NODE_SIZE, AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING ) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java index 9e9266c8302e8..b138d46ac903a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -120,7 +121,7 @@ public void setupObjects() throws IllegalAccessException { Settings.EMPTY, Sets.newHashSet( MachineLearning.MAX_MACHINE_MEMORY_PERCENT, - MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT, + MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT, MachineLearning.MAX_OPEN_JOBS_PER_NODE, MachineLearning.MAX_LAZY_ML_NODES, MachineLearning.MAX_ML_NODE_SIZE, diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java index 1c01072dcf519..57af1701fbb37 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlConfigIndex; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -103,7 +104,7 @@ public void setUpMocks() { MachineLearning.MAX_LAZY_ML_NODES, MachineLearning.MAX_ML_NODE_SIZE, MachineLearning.MAX_OPEN_JOBS_PER_NODE, - MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT + MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT ) ) ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/NativeMemoryCalculatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/NativeMemoryCalculatorTests.java index e6464ba5c4e12..7f7c22594abb8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/NativeMemoryCalculatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/NativeMemoryCalculatorTests.java @@ -37,12 +37,12 @@ import java.util.function.BiConsumer; import static org.elasticsearch.xpack.core.ml.MachineLearningField.MAX_MODEL_MEMORY_LIMIT; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT; import static org.elasticsearch.xpack.ml.MachineLearning.MACHINE_MEMORY_NODE_ATTR; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_JVM_SIZE_NODE_ATTR; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_LAZY_ML_NODES; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_MACHINE_MEMORY_PERCENT; import static org.elasticsearch.xpack.ml.MachineLearning.MAX_ML_NODE_SIZE; -import static org.elasticsearch.xpack.ml.MachineLearning.USE_AUTO_MACHINE_MEMORY_PERCENT; import static org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingDeciderServiceTests.AUTO_NODE_TIERS_NO_MONITORING; import static org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingDeciderServiceTests.AUTO_NODE_TIERS_WITH_MONITORING; import static org.elasticsearch.xpack.ml.utils.NativeMemoryCalculator.MINIMUM_AUTOMATIC_NODE_SIZE; From 2712dc5da5f67a2810e309d6d0a2e125178c3a62 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 10 Oct 2023 10:25:39 +0100 Subject: [PATCH 102/176] Add known-issue docs for snapshot downgrades bug (#100412) --- docs/reference/release-notes/8.10.0.asciidoc | 30 ++++++++++++++++++++ docs/reference/release-notes/8.10.1.asciidoc | 6 ++++ docs/reference/release-notes/8.10.2.asciidoc | 6 +++- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/docs/reference/release-notes/8.10.0.asciidoc b/docs/reference/release-notes/8.10.0.asciidoc index ea0af6c485f25..b8a57c702b8e7 100644 --- a/docs/reference/release-notes/8.10.0.asciidoc +++ b/docs/reference/release-notes/8.10.0.asciidoc @@ -3,6 +3,36 @@ Also see <>. +[[known-issues-8.10.0]] +[float] +=== Known issues + +// tag::repositorydata-format-change[] +* Snapshot-based downgrades ++ +The snapshot repository format changed in a manner that prevents earlier +versions of Elasticsearch from reading the repository contents if it contains +snapshots from this version and the last cluster to write to this repository +was in the 8.10 series. This will prevent you from reverting an upgrade to the +8.10 series by restoring a snapshot taken before the upgrade. ++ +Snapshot repositories written by clusters running versions 8.11.0 and later are +compatible with all earlier versions. Moreover, clusters running version 8.11.0 +or later will also automatically repair the repository format the first time +they write to the repository to take or delete a snapshot, making it so that +all earlier versions can read its contents again. ++ +If you wish to downgrade to a version prior to 8.9.0, take or delete a snapshot +using a cluster running version 8.11.0 or later to repair the repository format +first. If you cannot repair the repository in this way, first delete all the +snapshots in the repository taken with version 8.9.0 or later. To do this will +require using a cluster running version 8.10.0 or later. ++ +If you wish to downgrade to a version in the 8.9 series, you must take or +delete a snapshot using a cluster running version 8.11.0 or later to repair the +repository format first. +// end::repositorydata-format-change[] + [[breaking-8.10.0]] [float] === Breaking changes diff --git a/docs/reference/release-notes/8.10.1.asciidoc b/docs/reference/release-notes/8.10.1.asciidoc index de890ccadd772..d049d5b33b1f7 100644 --- a/docs/reference/release-notes/8.10.1.asciidoc +++ b/docs/reference/release-notes/8.10.1.asciidoc @@ -3,6 +3,12 @@ Also see <>. +[[known-issues-8.10.1]] +[float] +=== Known issues + +include::8.10.0.asciidoc[tag=repositorydata-format-change] + [[bug-8.10.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.10.2.asciidoc b/docs/reference/release-notes/8.10.2.asciidoc index 248f07530b8a9..c428b4534fe79 100644 --- a/docs/reference/release-notes/8.10.2.asciidoc +++ b/docs/reference/release-notes/8.10.2.asciidoc @@ -1,6 +1,10 @@ [[release-notes-8.10.2]] == {es} version 8.10.2 -8.10.2 contains no significant changes. +[[known-issues-8.10.2]] +[float] +=== Known issues + +include::8.10.0.asciidoc[tag=repositorydata-format-change] Also see <>. From b286fb35408247b66e2b2dbac56bec998614a797 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Tue, 10 Oct 2023 10:38:39 +0100 Subject: [PATCH 103/176] MatchingDirectoryReader should not use a threaded searcher (#100527) MatchingDirectoryReader is a test reader wrapper that filters out documents matching a particular query. For each leaf, we create an IndexSearcher, execute the query against it and then use that as a filter for the leaf. This searcher is created using LuceneTestCase.newSearcher() and as such may be multi- threaded, which triggers extra index checks. For tests that are expecting certain methods to be called against internal readers a given number of times, these extra checks can add additional calls which then lead to a failure of test assumptions. Because this IndexSearcher is only executed against a single leaf it will only ever use a single thread, and so we can explicitly disable threading here. Fixes #100487 Fixes #99916 --- .../org/elasticsearch/index/engine/InternalEngineTests.java | 1 - .../org/elasticsearch/index/shard/ShardGetServiceTests.java | 1 - .../java/org/elasticsearch/index/engine/EngineTestCase.java | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 5cec0c889d414..aed83cf8abd95 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1039,7 +1039,6 @@ public void testSimpleOperations() throws Exception { searchResult.close(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99916") public void testGetWithSearcherWrapper() throws Exception { engine.refresh("warm_up"); engine.index(indexForDoc(createParsedDoc("1", null))); diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index 48f86b2ad82a4..f099fa657b89c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -95,7 +95,6 @@ public void testGetFromTranslogWithStringSourceMappingOptionsAndStoredFields() t runGetFromTranslogWithOptions(docToIndex, sourceOptions, noSource ? "" : "{\"bar\":\"bar\"}", "\"text\"", "foo", false); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100487") public void testGetFromTranslogWithLongSourceMappingOptionsAndStoredFields() throws IOException { String docToIndex = """ {"foo" : 7, "bar" : 42} diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index bcb42f519a290..17f2303eb84c8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -1509,7 +1509,7 @@ public MatchingDirectoryReader(DirectoryReader in, Query query) throws IOExcepti @Override public LeafReader wrap(LeafReader leaf) { try { - final IndexSearcher searcher = newSearcher(leaf, false); + final IndexSearcher searcher = newSearcher(leaf, false, true, false); searcher.setQueryCache(null); final Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1.0f); final Scorer scorer = weight.scorer(leaf.getContext()); From ea0d707afb550e6449414a95d0b2936d8eaaeabf Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 10 Oct 2023 10:48:24 +0100 Subject: [PATCH 104/176] AwaitsFix for #100580 --- .../test/java/org/elasticsearch/index/mapper/IdLoaderTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java index e67873fe4c761..b22d4269c7891 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java @@ -69,6 +69,7 @@ public void testSynthesizeIdSimple() throws Exception { prepareIndexReader(indexAndForceMerge(routing, docs), verify, false); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100580") public void testSynthesizeIdMultipleSegments() throws Exception { var routingPaths = List.of("dim1"); var routing = createRouting(routingPaths); From 4e6e539072a64bdb805cd62d48368ad5da8fe98a Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 10 Oct 2023 11:11:00 +0100 Subject: [PATCH 105/176] AwaitsFix for #96578 --- .../elasticsearch/action/admin/indices/create/CreateIndexIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 6375c1b8d45f5..27154c883d270 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -206,6 +206,7 @@ public void testInvalidShardCountSettingsWithoutPrefix() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96578") public void testCreateAndDeleteIndexConcurrently() throws InterruptedException { createIndex("test"); final AtomicInteger indexVersion = new AtomicInteger(0); From 007a3df40758086a8839596f75b2403fa101e0c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 10 Oct 2023 12:24:31 +0200 Subject: [PATCH 106/176] Unmuting UpgradeClusterClientYamlTestSuiteIT for risk assesment (#100579) --- .../resources/rest-api-spec/test/upgraded_cluster/10_basic.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index cab9f19363d48..d4aec6ac1f0ab 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -1,8 +1,5 @@ --- "Continue scroll after upgrade": - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/91637" - do: get: index: scroll_index From 8e5445b78e3041d038807bfc298ba26eca095d1c Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 10 Oct 2023 11:33:41 +0100 Subject: [PATCH 107/176] AwaitsFix for #100559 --- .../esql/expression/function/AbstractFunctionTestCase.java | 2 +- .../expression/function/scalar/nulls/CoalesceTests.java | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 3a6479215f479..cdff3f0b5f2ca 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -420,7 +420,7 @@ protected void assertSimpleWithNulls(List data, Block value, int nullBlo assertTrue("argument " + nullBlock + " is null", value.isNull(0)); } - public final void testEvaluateInManyThreads() throws ExecutionException, InterruptedException { + public void testEvaluateInManyThreads() throws ExecutionException, InterruptedException { assumeTrue("nothing to do if a type error", testCase.getExpectedTypeError() == null); assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); int count = 10_000; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java index 8db6b1bbd0c93..15d37acbccfcb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.function.Function; import java.util.function.Supplier; @@ -53,6 +54,12 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(builder.suppliers()); } + @Override + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100559") + public void testEvaluateInManyThreads() throws ExecutionException, InterruptedException { + super.testEvaluateInManyThreads(); + } + @Override protected void assertSimpleWithNulls(List data, Block value, int nullBlock) { for (int i = 0; i < data.size(); i++) { From 63c8233182c23dc0daa8644269ccbcc6fb23e000 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 10 Oct 2023 11:59:52 +0100 Subject: [PATCH 108/176] AwaitsFix for #100586 --- .../org/elasticsearch/compute/data/BlockAccountingTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java index 05f77357b9184..bb1cd019273ed 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java @@ -148,6 +148,7 @@ public void testBooleanBlockWithNullFirstValues() { assertThat(empty.ramBytesUsed(), lessThanOrEqualTo(expectedEmptyUsed)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100586") public void testIntBlock() { Block empty = new IntArrayBlock(new int[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); From 4369e79ae4a332ec05808e9a06880c1441dc259e Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 10 Oct 2023 12:19:41 +0100 Subject: [PATCH 109/176] AwaitsFix for #99774 --- .../resources/rest-api-spec/test/aggregations/terms.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/terms.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/terms.yml index 7deee31920cc8..347cf5e8de952 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/terms.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/terms.yml @@ -696,6 +696,9 @@ setup: --- "Global ordinals are loaded with the global_ordinals execution hint": + - skip: + version: all + reason: AwaitsFix https://github.com/elastic/elasticsearch/issues/99774 - do: index: From e351c681929c36a52473cb348d2643ba421d77fc Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 10 Oct 2023 22:27:14 +1100 Subject: [PATCH 110/176] Refactor GatewayService (#99994) This PR refactors GatewayService with the goal to make it easier to add new features. Resolves: #89310 --- .../cluster/ClusterStateTaskExecutor.java | 4 +- .../elasticsearch/gateway/GatewayService.java | 197 ++++--- .../gateway/GatewayServiceTests.java | 492 ++++++++++++++++-- .../concurrent/DeterministicTaskQueue.java | 11 +- 4 files changed, 598 insertions(+), 106 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index d62dbdc2cc173..081f8150d8c8e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -38,8 +38,8 @@ public interface ClusterStateTaskExecutor { * already have become master and updated the state in a way that would be inconsistent with the response that {@code N} sends back to * clients. * - * @return The resulting cluster state after executing all the tasks. If {code batchExecutionContext.initialState()} is returned then no - * update is published. + * @return The resulting cluster state after executing all the tasks. If {@code batchExecutionContext.initialState()} is returned then + * no update is published. */ ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception; diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index bb0ffe0ee1c8d..91280c4da40b6 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -31,9 +31,11 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import java.util.concurrent.atomic.AtomicBoolean; @@ -80,9 +82,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste private final TimeValue recoverAfterTime; private final int recoverAfterDataNodes; private final int expectedDataNodes; - - private final AtomicBoolean recoveryInProgress = new AtomicBoolean(); - private final AtomicBoolean scheduledRecovery = new AtomicBoolean(); + volatile PendingStateRecovery currentPendingStateRecovery; @Inject public GatewayService( @@ -131,8 +131,9 @@ public void clusterChanged(final ClusterChangedEvent event) { } final ClusterState state = event.state(); + final DiscoveryNodes nodes = state.nodes(); - if (state.nodes().isLocalNodeElectedMaster() == false) { + if (nodes.isLocalNodeElectedMaster() == false) { // not our job to recover return; } @@ -141,83 +142,153 @@ public void clusterChanged(final ClusterChangedEvent event) { return; } - final DiscoveryNodes nodes = state.nodes(); - if (state.nodes().getMasterNodeId() == null) { - logger.debug("not recovering from gateway, no master elected yet"); - } else if (recoverAfterDataNodes != -1 && nodes.getDataNodes().size() < recoverAfterDataNodes) { - logger.debug( - "not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]", - nodes.getDataNodes().size(), - recoverAfterDataNodes - ); - } else { - boolean enforceRecoverAfterTime; - String reason; - if (expectedDataNodes == -1) { - // no expected is set, honor recover_after_data_nodes - enforceRecoverAfterTime = true; - reason = "recover_after_time was set to [" + recoverAfterTime + "]"; - } else if (expectedDataNodes <= nodes.getDataNodes().size()) { - // expected is set and satisfied so recover immediately - enforceRecoverAfterTime = false; - reason = ""; + // At this point, we know the state is not recovered and this node is qualified for state recovery + // But we still need to check whether a previous one is running already + final long currentTerm = state.term(); + final PendingStateRecovery existingPendingStateRecovery = currentPendingStateRecovery; + + // Always start a new state recovery if the master term changes + // If there is a previous one still waiting, both will probably run but at most one of them will + // actually make changes to cluster state because either: + // 1. The previous recovers the cluster state and the current one will be skipped + // 2. The previous one sees a new cluster term and skips its own execution + if (existingPendingStateRecovery == null || existingPendingStateRecovery.expectedTerm < currentTerm) { + currentPendingStateRecovery = new PendingStateRecovery(currentTerm); + } + currentPendingStateRecovery.onDataNodeSize(nodes.getDataNodes().size()); + } + + /** + * This class manages the cluster state recovery behaviours. It has two major scenarios depending + * on whether {@code recoverAfterDataNodes} is configured. + * + *

When {@code recoverAfterDataNodes} is configured: + *

    + *
  1. Nothing can happen until it is reached + *
  2. When {@code recoverAfterDataNodes} is reached, the cluster either: + *
      + *
    • Recover immediately when {@code expectedDataNodes} is reached or + * both {@code expectedDataNodes} and {@code recoverAfterTime} are not configured + *
    • Or schedule a recovery with a delay of {@code recoverAfterTime} + *
    + *
  3. The scheduled recovery can be cancelled if {@code recoverAfterDataNodes} drops below required number + * before the recovery can happen. When this happens, the process goes back to the beginning (step 1). + *
  4. The recovery is scheduled only once each time {@code recoverAfterDataNodes} crosses the required number + *
+ * + *

When {@code recoverAfterDataNodes} is Not configured, the cluster either: + *

    + *
  • Recover immediately when {@code expectedDataNodes} is reached or + * both {@code expectedDataNodes} and {@code recoverAfterTime} are not configured + *
  • Or schedule a recovery with a delay of {@code recoverAfterTime} + *
+ */ + class PendingStateRecovery { + private final long expectedTerm; + @Nullable + private Scheduler.ScheduledCancellable scheduledRecovery; + private final AtomicBoolean taskSubmitted = new AtomicBoolean(); + + PendingStateRecovery(long expectedTerm) { + this.expectedTerm = expectedTerm; + } + + void onDataNodeSize(int currentDataNodeSize) { + if (recoverAfterDataNodes != -1 && currentDataNodeSize < recoverAfterDataNodes) { + logger.debug( + "not recovering from gateway, nodes_size (data) [{}] < recover_after_data_nodes [{}]", + currentDataNodeSize, + recoverAfterDataNodes + ); + cancelScheduledRecovery(); } else { - // expected is set but not satisfied so wait until it is satisfied or times out - enforceRecoverAfterTime = true; - reason = "expecting [" + expectedDataNodes + "] data nodes, but only have [" + nodes.getDataNodes().size() + "]"; + maybePerformOrScheduleRecovery(currentDataNodeSize); } - performStateRecovery(enforceRecoverAfterTime, reason); } - } - private void performStateRecovery(final boolean enforceRecoverAfterTime, final String reason) { - if (enforceRecoverAfterTime && recoverAfterTime != null) { - if (scheduledRecovery.compareAndSet(false, true)) { - logger.info("delaying initial state recovery for [{}]. {}", recoverAfterTime, reason); - threadPool.schedule(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - logger.warn("delayed state recovery failed", e); - resetRecoveredFlags(); - } - - @Override - protected void doRun() { - if (recoveryInProgress.compareAndSet(false, true)) { - logger.info("recover_after_time [{}] elapsed. performing state recovery...", recoverAfterTime); - runRecovery(); + void maybePerformOrScheduleRecovery(int currentDataNodeSize) { + if (expectedDataNodes != -1 && expectedDataNodes <= currentDataNodeSize) { + logger.debug( + "performing state recovery of term [{}], expected data nodes [{}] is reached", + expectedTerm, + expectedDataNodes + ); + cancelScheduledRecovery(); + runRecoveryImmediately(); + } else if (recoverAfterTime == null) { + logger.debug("performing state recovery of term [{}], no delay time is configured", expectedTerm); + cancelScheduledRecovery(); + runRecoveryImmediately(); + } else { + if (scheduledRecovery == null) { + logger.info( + "delaying initial state recovery for [{}] of term [{}]. expecting [{}] data nodes, but only have [{}]", + recoverAfterTime, + expectedTerm, + expectedDataNodes, + currentDataNodeSize + ); + scheduledRecovery = threadPool.schedule(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + logger.warn("delayed state recovery of term [" + expectedTerm + "] failed", e); } - } - }, recoverAfterTime, threadPool.generic()); - } - } else { - if (recoveryInProgress.compareAndSet(false, true)) { - try { - logger.debug("performing state recovery..."); - runRecovery(); - } catch (Exception e) { - logger.warn("state recovery failed", e); - resetRecoveredFlags(); + + @Override + protected void doRun() { + final PendingStateRecovery existingPendingStateRecovery = currentPendingStateRecovery; + if (PendingStateRecovery.this == existingPendingStateRecovery) { + runRecoveryImmediately(); + } else { + logger.debug( + "skip scheduled state recovery since a new one of term [{}] has started", + existingPendingStateRecovery.expectedTerm + ); + } + } + }, recoverAfterTime, threadPool.generic()); + } else { + logger.debug("state recovery is in already scheduled for term [{}]", expectedTerm); } } } - } - private void resetRecoveredFlags() { - recoveryInProgress.set(false); - scheduledRecovery.set(false); + void runRecoveryImmediately() { + if (taskSubmitted.compareAndSet(false, true)) { + submitUnbatchedTask(TASK_SOURCE, new RecoverStateUpdateTask(expectedTerm)); + } else { + logger.debug("state recovery task is already submitted"); + } + } + + void cancelScheduledRecovery() { + if (scheduledRecovery != null) { + scheduledRecovery.cancel(); + scheduledRecovery = null; + } + } } private static final String TASK_SOURCE = "local-gateway-elected-state"; class RecoverStateUpdateTask extends ClusterStateUpdateTask { + private final long expectedTerm; + + RecoverStateUpdateTask(long expectedTerm) { + this.expectedTerm = expectedTerm; + } + @Override public ClusterState execute(final ClusterState currentState) { if (currentState.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { logger.debug("cluster is already recovered"); return currentState; } + if (expectedTerm != currentState.term()) { + logger.debug("skip state recovery since current term [{}] != expected term [{}]", currentState.term(), expectedTerm); + return currentState; + } return ClusterStateUpdaters.removeStateNotRecoveredBlock( ClusterStateUpdaters.updateRoutingTable(currentState, shardRoutingRoleStrategy) ); @@ -228,7 +299,6 @@ public void clusterStateProcessed(final ClusterState oldState, final ClusterStat logger.info("recovered [{}] indices into cluster_state", newState.metadata().indices().size()); // reset flag even though state recovery completed, to ensure that if we subsequently become leader again based on a // not-recovered state, that we again do another state recovery. - resetRecoveredFlags(); rerouteService.reroute("state recovered", Priority.NORMAL, ActionListener.noop()); } @@ -239,7 +309,6 @@ public void onFailure(final Exception e) { () -> "unexpected failure during [" + TASK_SOURCE + "]", e ); - resetRecoveredFlags(); } } @@ -248,10 +317,6 @@ TimeValue recoverAfterTime() { return recoverAfterTime; } - private void runRecovery() { - submitUnbatchedTask(TASK_SOURCE, new RecoverStateUpdateTask()); - } - @SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String source, ClusterStateUpdateTask task) { clusterService.submitUnbatchedStateUpdateTask(source, task); diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java index e17522cd1efef..b4296ae684840 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java @@ -8,77 +8,138 @@ package org.elasticsearch.gateway; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.coordination.CoordinationMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RerouteService; +import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; +import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.cluster.service.FakeThreadPoolMasterService; +import org.elasticsearch.cluster.service.MasterServiceTaskQueue; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; +import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; +import org.junit.Before; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; +import static org.elasticsearch.gateway.GatewayService.EXPECTED_DATA_NODES_SETTING; +import static org.elasticsearch.gateway.GatewayService.RECOVER_AFTER_DATA_NODES_SETTING; +import static org.elasticsearch.gateway.GatewayService.RECOVER_AFTER_TIME_SETTING; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; -import static org.elasticsearch.test.NodeRoles.masterNode; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.CoreMatchers.sameInstance; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class GatewayServiceTests extends ESTestCase { - private GatewayService createService(final Settings.Builder settings) { - final ClusterService clusterService = new ClusterService( - Settings.builder().put("cluster.name", "GatewayServiceTests").build(), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - null, - (TaskManager) null - ); - return new GatewayService( - settings.build(), - (reason, priority, listener) -> fail("should not reroute"), + private DeterministicTaskQueue deterministicTaskQueue; + private AtomicInteger rerouteCount; + private String dataNodeIdPrefix; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + deterministicTaskQueue = new DeterministicTaskQueue(); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), equalTo(0L)); + rerouteCount = new AtomicInteger(); + dataNodeIdPrefix = randomAlphaOfLength(10) + "-"; + } + + private GatewayService createGatewayService(final Settings.Builder settingsBuilder, final ClusterState initialState) { + return createGatewayService(createClusterService(settingsBuilder, initialState)); + } + + private GatewayService createGatewayService(final ClusterService clusterService) { + final RerouteService rerouteService = (reason, priority, listener) -> { + rerouteCount.incrementAndGet(); + listener.onResponse(null); + }; + + final var gatewayService = new GatewayService( + clusterService.getSettings(), + rerouteService, clusterService, - TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, - null + ShardRoutingRoleStrategy.NO_SHARD_CREATION, + clusterService.threadPool() + ); + + gatewayService.start(); + return gatewayService; + } + + private ClusterService createClusterService(final Settings.Builder settingsBuilder, final ClusterState initialState) { + final var threadPool = deterministicTaskQueue.getThreadPool(); + final var settings = settingsBuilder.build(); + final var clusterSettings = createBuiltInClusterSettings(settings); + + final var clusterService = new ClusterService( + settings, + clusterSettings, + new FakeThreadPoolMasterService(initialState.nodes().getLocalNodeId(), threadPool, deterministicTaskQueue::scheduleNow), + new ClusterApplierService(initialState.nodes().getLocalNodeId(), settings, clusterSettings, threadPool) { + @Override + protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { + return deterministicTaskQueue.getPrioritizedEsThreadPoolExecutor(); + } + } ); + + clusterService.getClusterApplierService().setInitialState(initialState); + clusterService.setNodeConnectionsService(ClusterServiceUtils.createNoOpNodeConnectionsService()); + clusterService.getMasterService() + .setClusterStatePublisher(ClusterServiceUtils.createClusterStatePublisher(clusterService.getClusterApplierService())); + clusterService.getMasterService().setClusterStateSupplier(clusterService.getClusterApplierService()::state); + clusterService.start(); + return clusterService; } public void testDefaultRecoverAfterTime() { // check that the default is not set - GatewayService service = createService(Settings.builder()); + final ClusterState initialState = buildClusterState(1, 1); + GatewayService service = createGatewayService(Settings.builder(), initialState); assertNull(service.recoverAfterTime()); // ensure default is set when setting expected_data_nodes - service = createService(Settings.builder().put("gateway.expected_data_nodes", 1)); - assertThat(service.recoverAfterTime(), Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET)); + service = createGatewayService(Settings.builder().put("gateway.expected_data_nodes", 1), initialState); + assertThat(service.recoverAfterTime(), equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET)); // ensure settings override default final TimeValue timeValue = TimeValue.timeValueHours(3); // ensure default is set when setting expected_nodes - service = createService(Settings.builder().put("gateway.recover_after_time", timeValue.toString())); - assertThat(service.recoverAfterTime().millis(), Matchers.equalTo(timeValue.millis())); + service = createGatewayService(Settings.builder().put("gateway.recover_after_time", timeValue.toString()), initialState); + assertThat(service.recoverAfterTime().millis(), equalTo(timeValue.millis())); } public void testRecoverStateUpdateTask() throws Exception { - GatewayService service = createService(Settings.builder()); - ClusterStateUpdateTask clusterStateUpdateTask = service.new RecoverStateUpdateTask(); - String nodeId = randomAlphaOfLength(10); - DiscoveryNode masterNode = DiscoveryNodeUtils.builder(nodeId) - .applySettings(settings(IndexVersion.current()).put(masterNode()).build()) - .address(new TransportAddress(TransportAddress.META_ADDRESS, 9300)) - .build(); - ClusterState stateWithBlock = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) - .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build()) - .build(); + final long expectedTerm = randomLongBetween(1, 42); + ClusterState stateWithBlock = buildClusterState(1, expectedTerm); + GatewayService service = createGatewayService(Settings.builder(), stateWithBlock); + ClusterStateUpdateTask clusterStateUpdateTask = service.new RecoverStateUpdateTask(expectedTerm); ClusterState recoveredState = clusterStateUpdateTask.execute(stateWithBlock); assertNotEquals(recoveredState, stateWithBlock); @@ -88,4 +149,363 @@ public void testRecoverStateUpdateTask() throws Exception { assertSame(recoveredState, clusterState); } + public void testRecoveryWillAbortIfExpectedTermDoesNotMatch() throws Exception { + final long expectedTerm = randomLongBetween(1, 42); + final ClusterState stateWithBlock = buildClusterState(1, randomLongBetween(43, 99)); + final GatewayService service = createGatewayService(Settings.builder(), stateWithBlock); + final ClusterStateUpdateTask clusterStateUpdateTask = service.new RecoverStateUpdateTask(expectedTerm); + + final ClusterState recoveredState = clusterStateUpdateTask.execute(stateWithBlock); + assertSame(recoveredState, stateWithBlock); + } + + public void testNoActionWhenNodeIsNotMaster() { + final String localNodeId = dataNodeIdPrefix + "0"; + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder() + .localNodeId(localNodeId) + .add(DiscoveryNodeUtils.create(localNodeId)); + if (randomBoolean()) { + final String masterNodeId = dataNodeIdPrefix + "1"; + nodesBuilder.masterNodeId(masterNodeId).add(DiscoveryNodeUtils.create(masterNodeId)); + } + + final ClusterState initialState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(nodesBuilder.build()) + .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build()) + .build(); + + final ClusterChangedEvent clusterChangedEvent = mock(ClusterChangedEvent.class); + when(clusterChangedEvent.state()).thenReturn(initialState); + + final GatewayService gatewayService = createGatewayService(Settings.builder(), initialState); + gatewayService.clusterChanged(clusterChangedEvent); + assertThat(deterministicTaskQueue.hasAnyTasks(), is(false)); + assertThat(gatewayService.currentPendingStateRecovery, nullValue()); + } + + public void testNoActionWhenStateIsAlreadyRecovered() { + final ClusterService clusterService = createClusterService( + Settings.builder() + .put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2) + .put(GatewayService.EXPECTED_DATA_NODES_SETTING.getKey(), 4) + .put(GatewayService.RECOVER_AFTER_TIME_SETTING.getKey(), TimeValue.timeValueMinutes(10)), + ClusterState.builder(buildClusterState(2, randomIntBetween(1, 42))).blocks(ClusterBlocks.builder()).build() + ); + final GatewayService gatewayService = createGatewayService(clusterService); + assertClusterStateBlocks(clusterService, false); + assertThat(rerouteCount.get(), equalTo(0)); + + final var taskQueue = createSetDataNodeCountTaskQueue(clusterService); + final int newDataNodeCount = randomIntBetween(1, 5); + taskQueue.submitTask(randomAlphaOfLength(5), new SetDataNodeCountTask(newDataNodeCount), null); + deterministicTaskQueue.runAllTasksInTimeOrder(); + + assertClusterStateBlocks(clusterService, false); + assertThat(rerouteCount.get(), equalTo(0)); + assertThat(gatewayService.currentPendingStateRecovery, nullValue()); + assertThat(clusterService.state().nodes().getDataNodes().size(), equalTo(newDataNodeCount)); + } + + public void testImmediateRecovery() { + final Settings.Builder settingsBuilder = Settings.builder(); + final int expectedNumberOfDataNodes = randomIntBetween(1, 3); + // The cluster recover immediately because it either has the required expectedDataNodes + // or both expectedDataNodes and recoverAfterTime are not configured + if (randomBoolean()) { + settingsBuilder.put(EXPECTED_DATA_NODES_SETTING.getKey(), expectedNumberOfDataNodes); + } + + final ClusterState initialState = buildClusterState(expectedNumberOfDataNodes, 0); + final ClusterService clusterService = createClusterService(settingsBuilder, initialState); + final GatewayService gatewayService = createGatewayService(clusterService); + assertClusterStateBlocks(clusterService, true); + assertThat(rerouteCount.get(), equalTo(0)); + + // Recover immediately + final var setClusterStateTaskQueue = createSetClusterStateTaskQueue(clusterService); + final ClusterState clusterStateOfTerm1 = incrementTerm(initialState); + setClusterStateTaskQueue.submitTask(randomAlphaOfLength(5), new SetClusterStateTask(clusterStateOfTerm1), null); + assertThat(deterministicTaskQueue.hasRunnableTasks(), is(true)); + deterministicTaskQueue.runAllRunnableTasks(); + assertClusterStateBlocks(clusterService, false); + assertThat(rerouteCount.get(), equalTo(1)); + final var pendingStateRecoveryOfTerm1 = gatewayService.currentPendingStateRecovery; + assertThat(pendingStateRecoveryOfTerm1, notNullValue()); + + // Will *not* run recover again for the same term + setClusterStateTaskQueue.submitTask(randomAlphaOfLength(5), new SetClusterStateTask(clusterStateOfTerm1), null); + deterministicTaskQueue.runAllRunnableTasks(); + assertThat(deterministicTaskQueue.hasAnyTasks(), is(false)); + assertThat(rerouteCount.get(), equalTo(1)); + assertClusterStateBlocks(clusterService, true); + assertThat(gatewayService.currentPendingStateRecovery, sameInstance(pendingStateRecoveryOfTerm1)); + + // Will run recover again for a newer term + final ClusterState clusterStateOfTerm2 = ClusterState.builder(initialState) + .metadata(Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(2).build()).build()) + .build(); + setClusterStateTaskQueue.submitTask(randomAlphaOfLength(5), new SetClusterStateTask(clusterStateOfTerm2), null); + assertThat(deterministicTaskQueue.hasRunnableTasks(), is(true)); + deterministicTaskQueue.runAllRunnableTasks(); + assertClusterStateBlocks(clusterService, false); + assertThat(rerouteCount.get(), equalTo(2)); + assertThat(gatewayService.currentPendingStateRecovery, not(sameInstance(pendingStateRecoveryOfTerm1))); + + // Never ran any scheduled task since recovery is immediate + assertThat(deterministicTaskQueue.hasDeferredTasks(), is(false)); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), equalTo(0L)); + } + + public void testScheduledRecovery() { + final var hasRecoverAfterTime = randomBoolean(); + final ClusterService clusterService = createServicesTupleForScheduledRecovery(randomIntBetween(2, 5), hasRecoverAfterTime).v1(); + + // Recover when the scheduled recovery is ready to run + deterministicTaskQueue.runAllTasksInTimeOrder(); + assertClusterStateBlocks(clusterService, false); + assertThat(rerouteCount.get(), equalTo(1)); + assertTimeElapsed(TimeValue.timeValueMinutes(hasRecoverAfterTime ? 10 : 5).millis()); + } + + public void testScheduledRecoveryCancelledWhenClusterCanRecoverImmediately() { + final var expectedNumberOfDataNodes = randomIntBetween(2, 5); + final boolean hasRecoverAfterTime = randomBoolean(); + final var servicesTuple = createServicesTupleForScheduledRecovery(expectedNumberOfDataNodes, hasRecoverAfterTime); + final ClusterService clusterService = servicesTuple.v1(); + final GatewayService gatewayService = servicesTuple.v2(); + final var pendingStateRecoveryOfTerm1 = gatewayService.currentPendingStateRecovery; + + // The 1st schedule is cancelled when the cluster has enough nodes + final var setDataNodeCountTaskQueue = createSetDataNodeCountTaskQueue(clusterService); + setDataNodeCountTaskQueue.submitTask(randomAlphaOfLength(5), new SetDataNodeCountTask(expectedNumberOfDataNodes), null); + deterministicTaskQueue.runAllRunnableTasks(); + assertClusterStateBlocks(clusterService, false); + assertThat(rerouteCount.get(), equalTo(1)); + assertThat(gatewayService.currentPendingStateRecovery, sameInstance(pendingStateRecoveryOfTerm1)); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), equalTo(0L)); + // Cancelled scheduled recovery is a no-op + deterministicTaskQueue.runAllTasksInTimeOrder(); + assertThat(rerouteCount.get(), equalTo(1)); + assertTimeElapsed(TimeValue.timeValueMinutes(hasRecoverAfterTime ? 10 : 5).millis()); + } + + public void testScheduledRecoveryNoOpWhenNewTermBegins() { + final var hasRecoverAfterTime = randomBoolean(); + final var servicesTuple = createServicesTupleForScheduledRecovery(randomIntBetween(2, 5), hasRecoverAfterTime); + final ClusterService clusterService = servicesTuple.v1(); + final GatewayService gatewayService = servicesTuple.v2(); + final var setClusterStateTaskQueue = createSetClusterStateTaskQueue(clusterService); + final var pendingStateRecoveryOfTerm1 = gatewayService.currentPendingStateRecovery; + + // The 1st schedule is effectively cancelled if a new term begins + final TimeValue elapsed = TimeValue.timeValueMinutes(1); + final ClusterState clusterStateOfTerm2 = incrementTerm(clusterService.state()); + deterministicTaskQueue.scheduleAt( + elapsed.millis(), + () -> setClusterStateTaskQueue.submitTask(randomAlphaOfLength(5), new SetClusterStateTask(clusterStateOfTerm2), null) + ); + deterministicTaskQueue.advanceTime(); + deterministicTaskQueue.runAllRunnableTasks(); + assertThat(gatewayService.currentPendingStateRecovery, not(sameInstance(pendingStateRecoveryOfTerm1))); + // The 1st scheduled recovery is now a no-op + deterministicTaskQueue.advanceTime(); + deterministicTaskQueue.runAllRunnableTasks(); + assertThat( + deterministicTaskQueue.getCurrentTimeMillis(), + equalTo(TimeValue.timeValueMinutes(hasRecoverAfterTime ? 10 : 5).millis()) + ); + assertClusterStateBlocks(clusterService, true); + assertThat(rerouteCount.get(), equalTo(0)); + // The 2nd schedule will perform the recovery + deterministicTaskQueue.runAllTasksInTimeOrder(); + assertClusterStateBlocks(clusterService, false); + assertThat(rerouteCount.get(), equalTo(1)); + assertTimeElapsed(elapsed.millis() + TimeValue.timeValueMinutes(hasRecoverAfterTime ? 10 : 5).millis()); + } + + private Tuple createServicesTupleForScheduledRecovery( + int expectedNumberOfDataNodes, + boolean hasRecoverAfterTime + ) { + final Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(EXPECTED_DATA_NODES_SETTING.getKey(), expectedNumberOfDataNodes); + if (hasRecoverAfterTime) { + settingsBuilder.put(RECOVER_AFTER_TIME_SETTING.getKey(), TimeValue.timeValueMinutes(10)); + } + final ClusterState initialState = buildClusterState(1, 0); + final ClusterService clusterService = createClusterService(settingsBuilder, initialState); + final GatewayService gatewayService = createGatewayService(clusterService); + assertClusterStateBlocks(clusterService, true); + + final ClusterState clusterStateOfTerm1 = incrementTerm(initialState); + final var setClusterStateTaskQueue = createSetClusterStateTaskQueue(clusterService); + setClusterStateTaskQueue.submitTask(randomAlphaOfLength(5), new SetClusterStateTask(clusterStateOfTerm1), null); + deterministicTaskQueue.runAllRunnableTasks(); // publish cluster state term change + // recovery is scheduled but has not run yet + assertThat(deterministicTaskQueue.hasDeferredTasks(), is(true)); + assertClusterStateBlocks(clusterService, true); + assertThat(rerouteCount.get(), equalTo(0)); + final GatewayService.PendingStateRecovery pendingStateRecoveryOfInitialTerm = gatewayService.currentPendingStateRecovery; + assertThat(pendingStateRecoveryOfInitialTerm, notNullValue()); + return new Tuple<>(clusterService, gatewayService); + } + + public void testScheduledRecoveryWithRecoverAfterNodes() { + final Settings.Builder settingsBuilder = Settings.builder(); + final int expectedNumberOfDataNodes = randomIntBetween(4, 6); + final boolean hasRecoverAfterTime = randomBoolean(); + if (hasRecoverAfterTime) { + settingsBuilder.put(RECOVER_AFTER_TIME_SETTING.getKey(), TimeValue.timeValueMinutes(10)); + } else { + settingsBuilder.put(EXPECTED_DATA_NODES_SETTING.getKey(), expectedNumberOfDataNodes); + } + final int recoverAfterNodes = expectedNumberOfDataNodes - 1; + settingsBuilder.put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), recoverAfterNodes); + + final ClusterState initialState = buildClusterState(1, 1); + final ClusterService clusterService = createClusterService(settingsBuilder, initialState); + final GatewayService gatewayService = createGatewayService(clusterService); + assertClusterStateBlocks(clusterService, true); + + // Not recover because recoverAfterDataNodes not met + final var setDataNodeCountTaskQueue = createSetDataNodeCountTaskQueue(clusterService); + setDataNodeCountTaskQueue.submitTask(randomAlphaOfLength(5), new SetDataNodeCountTask(recoverAfterNodes - 1), null); + deterministicTaskQueue.runAllTasksInTimeOrder(); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), equalTo(0L)); + assertClusterStateBlocks(clusterService, true); + final var pendingStateRecoveryOfInitialTerm = gatewayService.currentPendingStateRecovery; + assertThat(pendingStateRecoveryOfInitialTerm, notNullValue()); + + // The 1st scheduled recovery when recoverAfterDataNodes is met + setDataNodeCountTaskQueue.submitTask(randomAlphaOfLength(5), new SetDataNodeCountTask(recoverAfterNodes), null); + deterministicTaskQueue.runAllRunnableTasks(); + assertThat(deterministicTaskQueue.hasDeferredTasks(), is(true)); + assertThat(gatewayService.currentPendingStateRecovery, sameInstance(pendingStateRecoveryOfInitialTerm)); + + // The 1st schedule is cancelled when data nodes drop below recoverAfterDataNodes + final TimeValue elapsed = TimeValue.timeValueMinutes(1); + deterministicTaskQueue.scheduleAt( + elapsed.millis(), + () -> setDataNodeCountTaskQueue.submitTask(randomAlphaOfLength(5), new SetDataNodeCountTask(recoverAfterNodes - 1), null) + ); + deterministicTaskQueue.advanceTime(); + deterministicTaskQueue.runAllRunnableTasks(); + + // The 2nd scheduled recovery when data nodes are above recoverAfterDataNodes again + deterministicTaskQueue.scheduleAt( + elapsed.millis() * 2, + () -> setDataNodeCountTaskQueue.submitTask(randomAlphaOfLength(5), new SetDataNodeCountTask(recoverAfterNodes), null) + ); + deterministicTaskQueue.advanceTime(); + deterministicTaskQueue.runAllRunnableTasks(); + + assertThat(gatewayService.currentPendingStateRecovery, sameInstance(pendingStateRecoveryOfInitialTerm)); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), equalTo(elapsed.millis() * 2)); + + // The 1st scheduled recovery is now a no-op since it is cancelled + deterministicTaskQueue.advanceTime(); + deterministicTaskQueue.runAllRunnableTasks(); + assertThat( + deterministicTaskQueue.getCurrentTimeMillis(), + equalTo(TimeValue.timeValueMinutes(hasRecoverAfterTime ? 10 : 5).millis()) + ); + assertClusterStateBlocks(clusterService, true); + assertThat(rerouteCount.get(), equalTo(0)); + + // The 2nd scheduled recovery will recover the state + deterministicTaskQueue.runAllTasksInTimeOrder(); + assertClusterStateBlocks(clusterService, false); + assertThat(rerouteCount.get(), equalTo(1)); + assertTimeElapsed(elapsed.millis() * 2 + TimeValue.timeValueMinutes(hasRecoverAfterTime ? 10 : 5).millis()); + } + + private void assertClusterStateBlocks(ClusterService clusterService, boolean isBlocked) { + assertThat(clusterService.state().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK), is(isBlocked)); + } + + private void assertTimeElapsed(long elapsedInMillis) { + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), equalTo(elapsedInMillis)); + } + + private ClusterState buildClusterState(int numberOfNodes, long term) { + assert numberOfNodes >= 1; + final String localNodeId = dataNodeIdPrefix + "0"; + final DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder() + .localNodeId(localNodeId) + .masterNodeId(localNodeId) + .add(DiscoveryNodeUtils.create(localNodeId)); + for (int i = 1; i < numberOfNodes; i++) { + discoveryNodesBuilder.add(DiscoveryNodeUtils.create(dataNodeIdPrefix + i)); + } + + final ClusterState stateWithBlock = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodesBuilder.build()) + .metadata(Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(term).build()).build()) + .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build()) + .build(); + return stateWithBlock; + } + + private static ClusterState incrementTerm(ClusterState initialState) { + return ClusterState.builder(initialState) + .metadata(Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(initialState.term() + 1).build()).build()) + .build(); + } + + record SetDataNodeCountTask(int dataNodeCount) implements ClusterStateTaskListener { + SetDataNodeCountTask(int dataNodeCount) { + assertThat(dataNodeCount, greaterThanOrEqualTo(1)); + this.dataNodeCount = dataNodeCount; + } + + @Override + public void onFailure(Exception e) { + fail(e, "unexpected failure"); + } + } + + private MasterServiceTaskQueue createSetDataNodeCountTaskQueue(ClusterService clusterService) { + return clusterService.createTaskQueue("set-data-node-count", Priority.NORMAL, batchExecutionContext -> { + final ClusterState initialState = batchExecutionContext.initialState(); + final DiscoveryNodes initialNodes = initialState.nodes(); + final int initialDataNodeCount = initialNodes.getDataNodes().size(); + int targetDataNodeCount = initialDataNodeCount; + for (var taskContext : batchExecutionContext.taskContexts()) { + targetDataNodeCount = taskContext.getTask().dataNodeCount(); + taskContext.success(() -> {}); + } + if (targetDataNodeCount == initialDataNodeCount) { + return initialState; + } + + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(initialNodes); + for (int i = initialDataNodeCount; i < targetDataNodeCount; i++) { + nodesBuilder.add(DiscoveryNodeUtils.create(dataNodeIdPrefix + i)); + } + for (int i = targetDataNodeCount; i < initialDataNodeCount; i++) { + nodesBuilder.remove(dataNodeIdPrefix + i); + } + final DiscoveryNodes targetNodes = nodesBuilder.build(); + assertThat(targetNodes.getDataNodes().size(), equalTo(targetDataNodeCount)); + return ClusterState.builder(initialState).nodes(targetNodes).build(); + }); + } + + record SetClusterStateTask(ClusterState clusterState) implements ClusterStateTaskListener { + @Override + public void onFailure(Exception e) { + fail(e, "unexpected failure"); + } + } + + private MasterServiceTaskQueue createSetClusterStateTaskQueue(ClusterService clusterService) { + return clusterService.createTaskQueue("set-cluster-state", Priority.NORMAL, batchExecutionContext -> { + ClusterState targetState = batchExecutionContext.initialState(); + for (var taskContext : batchExecutionContext.taskContexts()) { + targetState = taskContext.getTask().clusterState(); + taskContext.success(() -> {}); + } + return targetState; + }); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java b/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java index 96b3510ed6afe..81a419508dbee 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java @@ -82,7 +82,7 @@ public void runAllRunnableTasks() { } public void runAllTasks() { - while (hasDeferredTasks() || hasRunnableTasks()) { + while (hasAnyTasks()) { if (hasDeferredTasks() && random.nextBoolean()) { advanceTime(); } else if (hasRunnableTasks()) { @@ -92,7 +92,7 @@ public void runAllTasks() { } public void runAllTasksInTimeOrder() { - while (hasDeferredTasks() || hasRunnableTasks()) { + while (hasAnyTasks()) { if (hasRunnableTasks()) { runRandomTask(); } else { @@ -115,6 +115,13 @@ public boolean hasDeferredTasks() { return deferredTasks.isEmpty() == false; } + /** + * @return whether there are any runnable or deferred tasks + */ + public boolean hasAnyTasks() { + return hasDeferredTasks() || hasRunnableTasks(); + } + /** * @return the current (simulated) time, in milliseconds. */ From 9322ab9b9163f70c9bf832f1b0a1985121393cfe Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:37:07 +0300 Subject: [PATCH 111/176] Don't print synthetic source in mapping for bwc tests (#100572) * Don't print synthetic source in mapping for bwc tests * Move comment. --- .../index/mapper/SourceFieldMapper.java | 34 +++++++++---------- .../index/mapper/SourceFieldMapperTests.java | 8 +++++ .../query/SearchExecutionContextTests.java | 2 +- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index c5d5dbec1ef15..aeab22a6f5f35 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -101,20 +101,7 @@ public static class Builder extends MetadataFieldMapper.Builder { (previous, current, conflicts) -> (previous.value() == current.value()) || (previous.value() && current.value() == false) ); - /* - * The default mode for TimeSeries is left empty on purpose, so that mapping printings include the synthetic - * source mode. - */ - private final Parameter mode = new Parameter<>( - "mode", - true, - () -> null, - (n, c, o) -> Mode.valueOf(o.toString().toUpperCase(Locale.ROOT)), - m -> toType(m).enabled.explicit() ? null : toType(m).mode, - (b, n, v) -> b.field(n, v.toString().toLowerCase(Locale.ROOT)), - v -> v.toString().toLowerCase(Locale.ROOT) - ).setMergeValidator((previous, current, conflicts) -> (previous == current) || current != Mode.STORED) - .setSerializerCheck((includeDefaults, isConfigured, value) -> value != null); // don't emit if `enabled` is configured + private final Parameter mode; private final Parameter> includes = Parameter.stringArrayParam( "includes", false, @@ -128,9 +115,22 @@ public static class Builder extends MetadataFieldMapper.Builder { private final IndexMode indexMode; - public Builder(IndexMode indexMode) { + public Builder(IndexMode indexMode, IndexVersion indexVersion) { super(Defaults.NAME); this.indexMode = indexMode; + this.mode = new Parameter<>( + "mode", + true, + // The default mode for TimeSeries is left empty on purpose, so that mapping printings include the synthetic source mode. + () -> getIndexMode() == IndexMode.TIME_SERIES && indexVersion.between(IndexVersion.V_8_7_0, IndexVersion.V_8_10_0) + ? Mode.SYNTHETIC + : null, + (n, c, o) -> Mode.valueOf(o.toString().toUpperCase(Locale.ROOT)), + m -> toType(m).enabled.explicit() ? null : toType(m).mode, + (b, n, v) -> b.field(n, v.toString().toLowerCase(Locale.ROOT)), + v -> v.toString().toLowerCase(Locale.ROOT) + ).setMergeValidator((previous, current, conflicts) -> (previous == current) || current != Mode.STORED) + .setSerializerCheck((includeDefaults, isConfigured, value) -> value != null); // don't emit if `enabled` is configured } public Builder setSynthetic() { @@ -188,7 +188,7 @@ private IndexMode getIndexMode() { c -> c.getIndexSettings().getMode() == IndexMode.TIME_SERIES ? c.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersion.V_8_7_0) ? TSDB_DEFAULT : TSDB_LEGACY_DEFAULT : DEFAULT, - c -> new Builder(c.getIndexSettings().getMode()) + c -> new Builder(c.getIndexSettings().getMode(), c.getIndexSettings().getIndexVersionCreated()) ); static final class SourceFieldType extends MappedFieldType { @@ -313,7 +313,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(indexMode).init(this); + return new Builder(indexMode, IndexVersion.current()).init(this); } /** diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index f683cb60c87c3..433ebc467483d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -12,6 +12,8 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; @@ -238,4 +240,10 @@ public void testSyntheticSourceInTimeSeries() throws IOException { assertTrue(mapper.sourceMapper().isSynthetic()); assertEquals("{\"_source\":{\"mode\":\"synthetic\"}}", mapper.sourceMapper().toString()); } + + public void testSyntheticSourceInTimeSeriesBwc() throws IOException { + SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(IndexMode.TIME_SERIES, IndexVersion.V_8_8_0).build(); + assertTrue(sourceMapper.isSynthetic()); + assertEquals("{\"_source\":{\"mode\":\"synthetic\"}}", sourceMapper.toString()); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 6d671a258c26a..9df1dc24c2793 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -381,7 +381,7 @@ public void testSearchRequestRuntimeFieldsAndMultifieldDetection() { public void testSyntheticSourceSearchLookup() throws IOException { // Build a mapping using synthetic source - SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null).setSynthetic().build(); + SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, IndexVersion.current()).setSynthetic().build(); RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( new KeywordFieldMapper.Builder("cat", IndexVersion.current()).ignoreAbove(100) ).build(MapperBuilderContext.root(true, false)); From 969fd2acbffd98d6bfb8b1e0476d5e3fb2c2e435 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 10 Oct 2023 13:39:58 +0100 Subject: [PATCH 112/176] Snapshot deletion process cleanups (#100568) Reorders the methods involved in snapshot deletion to be closer together and better match the flow of execution, and harmonises the names of many parameters and local variables to make it easier to follow them through the process. --- .../repositories/s3/S3Repository.java | 8 +- .../repositories/FilterRepository.java | 6 +- .../repositories/InvalidRepository.java | 4 +- .../repositories/Repository.java | 12 +- .../repositories/UnknownTypeRepository.java | 4 +- .../blobstore/BlobStoreRepository.java | 495 ++++++++++-------- .../snapshots/SnapshotDeleteListener.java | 6 +- .../RepositoriesServiceTests.java | 4 +- .../index/shard/RestoreOnlyRepository.java | 4 +- .../xpack/ccr/repository/CcrRepository.java | 4 +- 10 files changed, 293 insertions(+), 254 deletions(-) diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 5a4b5b3a313d8..0df9410f0c32e 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -317,12 +317,12 @@ public void onFailure(Exception e) { @Override public void deleteSnapshots( Collection snapshotIds, - long repositoryStateId, - IndexVersion repositoryMetaVersion, + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, SnapshotDeleteListener listener ) { final SnapshotDeleteListener wrappedListener; - if (SnapshotsService.useShardGenerations(repositoryMetaVersion)) { + if (SnapshotsService.useShardGenerations(repositoryFormatIndexVersion)) { wrappedListener = listener; } else { wrappedListener = new SnapshotDeleteListener() { @@ -354,7 +354,7 @@ public void onFailure(Exception e) { } }; } - super.deleteSnapshots(snapshotIds, repositoryStateId, repositoryMetaVersion, wrappedListener); + super.deleteSnapshots(snapshotIds, repositoryDataGeneration, repositoryFormatIndexVersion, wrappedListener); } /** diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java index 439da9dba45c9..d747fa6780a73 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -73,11 +73,11 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte @Override public void deleteSnapshots( Collection snapshotIds, - long repositoryStateId, - IndexVersion repositoryMetaVersion, + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, SnapshotDeleteListener listener ) { - in.deleteSnapshots(snapshotIds, repositoryStateId, repositoryMetaVersion, listener); + in.deleteSnapshots(snapshotIds, repositoryDataGeneration, repositoryFormatIndexVersion, listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java index e95900d405698..4b5e22607758f 100644 --- a/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/InvalidRepository.java @@ -80,8 +80,8 @@ public void finalizeSnapshot(FinalizeSnapshotContext finalizeSnapshotContext) { @Override public void deleteSnapshots( Collection snapshotIds, - long repositoryStateId, - IndexVersion repositoryMetaVersion, + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, SnapshotDeleteListener listener ) { listener.onFailure(createCreationException()); diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index cb6c845bb037f..ee98cc212da14 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -137,15 +137,15 @@ public void onFailure(Exception e) { /** * Deletes snapshots * - * @param snapshotIds snapshot ids - * @param repositoryStateId the unique id identifying the state of the repository when the snapshot deletion began - * @param repositoryMetaVersion version of the updated repository metadata to write - * @param listener completion listener + * @param snapshotIds snapshot ids to delete + * @param repositoryDataGeneration the generation of the {@link RepositoryData} in the repository at the start of the deletion + * @param repositoryFormatIndexVersion the version of repository format to use, indicating the layout of blobs (for bwc) + * @param listener completion listener, see {@link SnapshotDeleteListener}. */ void deleteSnapshots( Collection snapshotIds, - long repositoryStateId, - IndexVersion repositoryMetaVersion, + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, SnapshotDeleteListener listener ); diff --git a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java index 087b974cbe2c6..db14413525641 100644 --- a/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/UnknownTypeRepository.java @@ -78,8 +78,8 @@ public void finalizeSnapshot(FinalizeSnapshotContext finalizeSnapshotContext) { @Override public void deleteSnapshots( Collection snapshotIds, - long repositoryStateId, - IndexVersion repositoryMetaVersion, + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, SnapshotDeleteListener listener ) { listener.onFailure(createUnknownTypeException()); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 82a38d74d25e1..9a2d53312d577 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -782,44 +782,6 @@ public RepositoryStats stats() { return new RepositoryStats(store.stats()); } - @Override - public void deleteSnapshots( - Collection snapshotIds, - long repositoryStateId, - IndexVersion repositoryMetaVersion, - SnapshotDeleteListener listener - ) { - if (isReadOnly()) { - listener.onFailure(new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository")); - } else { - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - final Map rootBlobs = blobContainer().listBlobs(OperationPurpose.SNAPSHOT); - final RepositoryData repositoryData = safeRepositoryData(repositoryStateId, rootBlobs); - // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never - // delete an index that was created by another master node after writing this index-N blob. - final Map foundIndices = blobStore().blobContainer(indicesPath()) - .children(OperationPurpose.SNAPSHOT); - doDeleteShardSnapshots( - snapshotIds, - repositoryStateId, - foundIndices, - rootBlobs, - repositoryData, - repositoryMetaVersion, - listener - ); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(new RepositoryException(metadata.name(), "failed to delete snapshots " + snapshotIds, e)); - } - }); - } - } - /** * Loads {@link RepositoryData} ensuring that it is consistent with the given {@code rootBlobs} as well of the assumed generation. * @@ -867,33 +829,91 @@ private RepositoryData safeRepositoryData(long repositoryStateId, Map snapshotIds, + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, + SnapshotDeleteListener listener + ) { + if (isReadOnly()) { + listener.onFailure(new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository")); + } else { + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + final Map rootBlobs = blobContainer().listBlobs(OperationPurpose.SNAPSHOT); + final RepositoryData repositoryData = safeRepositoryData(repositoryDataGeneration, rootBlobs); + // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never + // delete an index that was created by another master node after writing this index-N blob. + final Map foundIndices = blobStore().blobContainer(indicesPath()) + .children(OperationPurpose.SNAPSHOT); + doDeleteShardSnapshots( + snapshotIds, + repositoryDataGeneration, + foundIndices, + rootBlobs, + repositoryData, + repositoryFormatIndexVersion, + listener + ); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(new RepositoryException(metadata.name(), "failed to delete snapshots " + snapshotIds, e)); + } + }); + } + } + + /** + * The result of removing a snapshot from a shard folder in the repository. + * + * @param indexId Index that the snapshot was removed from + * @param shardId Shard id that the snapshot was removed from + * @param newGeneration Id of the new index-${uuid} blob that does not include the snapshot any more + * @param blobsToDelete Blob names in the shard directory that have become unreferenced in the new shard generation + */ + private record ShardSnapshotMetaDeleteResult( + IndexId indexId, + int shardId, + ShardGeneration newGeneration, + Collection blobsToDelete + ) {} + + // --------------------------------------------------------------------------------------------------------------------------------- + // The overall flow of execution + /** * After updating the {@link RepositoryData} each of the shards directories is individually first moved to the next shard generation * and then has all now unreferenced blobs in it deleted. * * @param snapshotIds SnapshotIds to delete - * @param repositoryStateId Expected repository state id - * @param foundIndices All indices folders found in the repository before executing any writes to the repository during this - * delete operation - * @param rootBlobs All blobs found at the root of the repository before executing any writes to the repository during this - * delete operation - * @param repositoryData RepositoryData found the in the repository before executing this delete + * @param originalRepositoryDataGeneration {@link RepositoryData} generation at the start of the process. + * @param originalIndexContainers All index containers at the start of the operation, obtained by listing the repository + * contents. + * @param originalRootBlobs All blobs found at the root of the repository at the start of the operation, obtained by + * listing the repository contents. + * @param originalRepositoryData {@link RepositoryData} at the start of the operation. + * @param repositoryFormatIndexVersion The minimum {@link IndexVersion} of the nodes in the cluster and the snapshots remaining in + * the repository. * @param listener Listener to invoke once finished */ private void doDeleteShardSnapshots( Collection snapshotIds, - long repositoryStateId, - Map foundIndices, - Map rootBlobs, - RepositoryData repositoryData, - IndexVersion repoMetaVersion, + long originalRepositoryDataGeneration, + Map originalIndexContainers, + Map originalRootBlobs, + RepositoryData originalRepositoryData, + IndexVersion repositoryFormatIndexVersion, SnapshotDeleteListener listener ) { - if (SnapshotsService.useShardGenerations(repoMetaVersion)) { + if (SnapshotsService.useShardGenerations(repositoryFormatIndexVersion)) { // First write the new shard state metadata (with the removed snapshot) and compute deletion targets final ListenableFuture> writeShardMetaDataAndComputeDeletesStep = new ListenableFuture<>(); - writeUpdatedShardMetaDataAndComputeDeletes(snapshotIds, repositoryData, true, writeShardMetaDataAndComputeDeletesStep); + writeUpdatedShardMetaDataAndComputeDeletes(snapshotIds, originalRepositoryData, true, writeShardMetaDataAndComputeDeletesStep); // Once we have put the new shard-level metadata into place, we can update the repository metadata as follows: // 1. Remove the snapshots from the list of existing snapshots // 2. Update the index shard generations of all updated shard folders @@ -902,28 +922,34 @@ private void doDeleteShardSnapshots( // index-${gen_uuid} will not be referenced by the existing RepositoryData and new RepositoryData is only // written if all shard paths have been successfully updated. final ListenableFuture writeUpdatedRepoDataStep = new ListenableFuture<>(); - writeShardMetaDataAndComputeDeletesStep.addListener(ActionListener.wrap(deleteResults -> { + writeShardMetaDataAndComputeDeletesStep.addListener(ActionListener.wrap(shardDeleteResults -> { final ShardGenerations.Builder builder = ShardGenerations.builder(); - for (ShardSnapshotMetaDeleteResult newGen : deleteResults) { + for (ShardSnapshotMetaDeleteResult newGen : shardDeleteResults) { builder.put(newGen.indexId, newGen.shardId, newGen.newGeneration); } - final RepositoryData updatedRepoData = repositoryData.removeSnapshots(snapshotIds, builder.build()); + final RepositoryData newRepositoryData = originalRepositoryData.removeSnapshots(snapshotIds, builder.build()); writeIndexGen( - updatedRepoData, - repositoryStateId, - repoMetaVersion, + newRepositoryData, + originalRepositoryDataGeneration, + repositoryFormatIndexVersion, Function.identity(), ActionListener.wrap(writeUpdatedRepoDataStep::onResponse, listener::onFailure) ); }, listener::onFailure)); // Once we have updated the repository, run the clean-ups - writeUpdatedRepoDataStep.addListener(ActionListener.wrap(updatedRepoData -> { - listener.onRepositoryDataWritten(updatedRepoData); + writeUpdatedRepoDataStep.addListener(ActionListener.wrap(newRepositoryData -> { + listener.onRepositoryDataWritten(newRepositoryData); // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion try (var refs = new RefCountingRunnable(listener::onDone)) { - cleanupUnlinkedRootAndIndicesBlobs(snapshotIds, foundIndices, rootBlobs, updatedRepoData, refs.acquireListener()); - asyncCleanupUnlinkedShardLevelBlobs( - repositoryData, + cleanupUnlinkedRootAndIndicesBlobs( + snapshotIds, + originalIndexContainers, + originalRootBlobs, + newRepositoryData, + refs.acquireListener() + ); + cleanupUnlinkedShardLevelBlobs( + originalRepositoryData, snapshotIds, writeShardMetaDataAndComputeDeletesStep.result(), refs.acquireListener() @@ -932,82 +958,64 @@ private void doDeleteShardSnapshots( }, listener::onFailure)); } else { // Write the new repository data first (with the removed snapshot), using no shard generations - final RepositoryData updatedRepoData = repositoryData.removeSnapshots(snapshotIds, ShardGenerations.EMPTY); - writeIndexGen(updatedRepoData, repositoryStateId, repoMetaVersion, Function.identity(), ActionListener.wrap(newRepoData -> { - try (var refs = new RefCountingRunnable(() -> { - listener.onRepositoryDataWritten(newRepoData); - listener.onDone(); - })) { - // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - cleanupUnlinkedRootAndIndicesBlobs(snapshotIds, foundIndices, rootBlobs, newRepoData, refs.acquireListener()); - - // writeIndexGen finishes on master-service thread so must fork here. - threadPool.executor(ThreadPool.Names.SNAPSHOT) - .execute( - ActionRunnable.wrap( - refs.acquireListener(), - l0 -> writeUpdatedShardMetaDataAndComputeDeletes( - snapshotIds, - repositoryData, - false, - l0.delegateFailure( - (l, deleteResults) -> asyncCleanupUnlinkedShardLevelBlobs( - repositoryData, - snapshotIds, - deleteResults, - l + writeIndexGen( + originalRepositoryData.removeSnapshots(snapshotIds, ShardGenerations.EMPTY), + originalRepositoryDataGeneration, + repositoryFormatIndexVersion, + Function.identity(), + ActionListener.wrap(newRepositoryData -> { + try (var refs = new RefCountingRunnable(() -> { + listener.onRepositoryDataWritten(newRepositoryData); + listener.onDone(); + })) { + // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion + cleanupUnlinkedRootAndIndicesBlobs( + snapshotIds, + originalIndexContainers, + originalRootBlobs, + newRepositoryData, + refs.acquireListener() + ); + + // writeIndexGen finishes on master-service thread so must fork here. + threadPool.executor(ThreadPool.Names.SNAPSHOT) + .execute( + ActionRunnable.wrap( + refs.acquireListener(), + l0 -> writeUpdatedShardMetaDataAndComputeDeletes( + snapshotIds, + originalRepositoryData, + false, + l0.delegateFailure( + (l, deleteResults) -> cleanupUnlinkedShardLevelBlobs( + originalRepositoryData, + snapshotIds, + deleteResults, + l + ) ) ) ) - ) - ); - } - }, listener::onFailure)); + ); + } + }, listener::onFailure) + ); } } - private void cleanupUnlinkedRootAndIndicesBlobs( - Collection deletedSnapshots, - Map foundIndices, - Map rootBlobs, - RepositoryData updatedRepoData, - ActionListener listener - ) { - cleanupStaleBlobs(deletedSnapshots, foundIndices, rootBlobs, updatedRepoData, listener.map(ignored -> null)); - } - - private void asyncCleanupUnlinkedShardLevelBlobs( - RepositoryData oldRepositoryData, - Collection snapshotIds, - Collection deleteResults, - ActionListener listener - ) { - final Iterator filesToDelete = resolveFilesToDelete(oldRepositoryData, snapshotIds, deleteResults); - if (filesToDelete.hasNext() == false) { - listener.onResponse(null); - return; - } - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { - try { - deleteFromContainer(blobContainer(), filesToDelete); - l.onResponse(null); - } catch (Exception e) { - logger.warn(() -> format("%s Failed to delete some blobs during snapshot delete", snapshotIds), e); - throw e; - } - })); - } + // --------------------------------------------------------------------------------------------------------------------------------- + // Updating the shard-level metadata and accumulating results // updates the shard state metadata for shards of a snapshot that is to be deleted. Also computes the files to be cleaned up. private void writeUpdatedShardMetaDataAndComputeDeletes( Collection snapshotIds, - RepositoryData oldRepositoryData, - boolean useUUIDs, + RepositoryData originalRepositoryData, + boolean useShardGenerations, ActionListener> onAllShardsCompleted ) { final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - final List indices = oldRepositoryData.indicesToUpdateAfterRemovingSnapshot(snapshotIds); + final List indices = originalRepositoryData.indicesToUpdateAfterRemovingSnapshot(snapshotIds); if (indices.isEmpty()) { onAllShardsCompleted.onResponse(Collections.emptyList()); @@ -1021,14 +1029,14 @@ private void writeUpdatedShardMetaDataAndComputeDeletes( ); for (IndexId indexId : indices) { - final Set snapshotsWithIndex = Set.copyOf(oldRepositoryData.getSnapshots(indexId)); + final Set snapshotsWithIndex = Set.copyOf(originalRepositoryData.getSnapshots(indexId)); final Set survivingSnapshots = snapshotsWithIndex.stream() .filter(id -> snapshotIds.contains(id) == false) .collect(Collectors.toSet()); final ListenableFuture> shardCountListener = new ListenableFuture<>(); final Collection indexMetaGenerations = snapshotIds.stream() .filter(snapshotsWithIndex::contains) - .map(id -> oldRepositoryData.indexMetaDataGenerations().indexMetaBlobId(id, indexId)) + .map(id -> originalRepositoryData.indexMetaDataGenerations().indexMetaBlobId(id, indexId)) .collect(Collectors.toSet()); final ActionListener allShardCountsListener = new GroupedActionListener<>( indexMetaGenerations.size(), @@ -1053,6 +1061,10 @@ private void writeUpdatedShardMetaDataAndComputeDeletes( } })); } + + // ----------------------------------------------------------------------------------------------------------------------------- + // Determining the shard count + shardCountListener.addListener(deleteIndexMetadataListener.delegateFailureAndWrap((delegate, counts) -> { final int shardCount = counts.stream().mapToInt(i -> i).max().orElse(0); if (shardCount == 0) { @@ -1061,24 +1073,27 @@ private void writeUpdatedShardMetaDataAndComputeDeletes( } // Listener for collecting the results of removing the snapshot from each shard's metadata in the current index final ActionListener allShardsListener = new GroupedActionListener<>(shardCount, delegate); - for (int shardId = 0; shardId < shardCount; shardId++) { - final int finalShardId = shardId; + for (int i = 0; i < shardCount; i++) { + final int shardId = i; executor.execute(new AbstractRunnable() { @Override protected void doRun() throws Exception { - final BlobContainer shardContainer = shardContainer(indexId, finalShardId); - final Set blobs = shardContainer.listBlobs(OperationPurpose.SNAPSHOT).keySet(); + final BlobContainer shardContainer = shardContainer(indexId, shardId); + final Set originalShardBlobs = shardContainer.listBlobs(OperationPurpose.SNAPSHOT).keySet(); final BlobStoreIndexShardSnapshots blobStoreIndexShardSnapshots; final long newGen; - if (useUUIDs) { + if (useShardGenerations) { newGen = -1L; blobStoreIndexShardSnapshots = buildBlobStoreIndexShardSnapshots( - blobs, + originalShardBlobs, shardContainer, - oldRepositoryData.shardGenerations().getShardGen(indexId, finalShardId) + originalRepositoryData.shardGenerations().getShardGen(indexId, shardId) ).v1(); } else { - Tuple tuple = buildBlobStoreIndexShardSnapshots(blobs, shardContainer); + Tuple tuple = buildBlobStoreIndexShardSnapshots( + originalShardBlobs, + shardContainer + ); newGen = tuple.v2() + 1; blobStoreIndexShardSnapshots = tuple.v1(); } @@ -1086,10 +1101,10 @@ protected void doRun() throws Exception { deleteFromShardSnapshotMeta( survivingSnapshots, indexId, - finalShardId, + shardId, snapshotIds, shardContainer, - blobs, + originalShardBlobs, blobStoreIndexShardSnapshots, newGen ) @@ -1099,12 +1114,7 @@ protected void doRun() throws Exception { @Override public void onFailure(Exception ex) { logger.warn( - () -> format( - "%s failed to delete shard data for shard [%s][%s]", - snapshotIds, - indexId.getName(), - finalShardId - ), + () -> format("%s failed to delete shard data for shard [%s][%s]", snapshotIds, indexId.getName(), shardId), ex ); // Just passing null here to count down the listener instead of failing it, the stale data left behind @@ -1117,6 +1127,120 @@ public void onFailure(Exception ex) { } } + // ----------------------------------------------------------------------------------------------------------------------------- + // Updating each shard + + /** + * Delete snapshot from shard level metadata. + * + * @param indexGeneration generation to write the new shard level level metadata to. If negative a uuid id shard generation should be + * used + */ + private ShardSnapshotMetaDeleteResult deleteFromShardSnapshotMeta( + Set survivingSnapshots, + IndexId indexId, + int shardId, + Collection snapshotIds, + BlobContainer shardContainer, + Set originalShardBlobs, + BlobStoreIndexShardSnapshots snapshots, + long indexGeneration + ) { + // Build a list of snapshots that should be preserved + final BlobStoreIndexShardSnapshots updatedSnapshots = snapshots.withRetainedSnapshots(survivingSnapshots); + ShardGeneration writtenGeneration = null; + try { + if (updatedSnapshots.snapshots().isEmpty()) { + return new ShardSnapshotMetaDeleteResult(indexId, shardId, ShardGenerations.DELETED_SHARD_GEN, originalShardBlobs); + } else { + if (indexGeneration < 0L) { + writtenGeneration = ShardGeneration.newGeneration(); + INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedSnapshots, shardContainer, writtenGeneration.toBlobNamePart(), compress); + } else { + writtenGeneration = new ShardGeneration(indexGeneration); + writeShardIndexBlobAtomic(shardContainer, indexGeneration, updatedSnapshots, Collections.emptyMap()); + } + final Set survivingSnapshotUUIDs = survivingSnapshots.stream().map(SnapshotId::getUUID).collect(Collectors.toSet()); + return new ShardSnapshotMetaDeleteResult( + indexId, + shardId, + writtenGeneration, + unusedBlobs(originalShardBlobs, survivingSnapshotUUIDs, updatedSnapshots) + ); + } + } catch (IOException e) { + throw new RepositoryException( + metadata.name(), + "Failed to finalize snapshot deletion " + + snapshotIds + + " with shard index [" + + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(writtenGeneration.toBlobNamePart()) + + "]", + e + ); + } + } + + // Unused blobs are all previous index-, data- and meta-blobs and that are not referenced by the new index- as well as all + // temporary blobs + private static List unusedBlobs( + Set originalShardBlobs, + Set survivingSnapshotUUIDs, + BlobStoreIndexShardSnapshots updatedSnapshots + ) { + return originalShardBlobs.stream() + .filter( + blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX) + || (blob.startsWith(SNAPSHOT_PREFIX) + && blob.endsWith(".dat") + && survivingSnapshotUUIDs.contains( + blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()) + ) == false) + || (blob.startsWith(UPLOADED_DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null) + || FsBlobContainer.isTempBlobName(blob) + ) + .toList(); + } + + // --------------------------------------------------------------------------------------------------------------------------------- + // Cleaning up dangling blobs + + /** + * Delete any dangling blobs in the repository root (i.e. {@link RepositoryData}, {@link SnapshotInfo} and {@link Metadata} blobs) + * as well as any containers for indices that are now completely unreferenced. + */ + private void cleanupUnlinkedRootAndIndicesBlobs( + Collection snapshotIds, + Map originalIndexContainers, + Map originalRootBlobs, + RepositoryData newRepositoryData, + ActionListener listener + ) { + cleanupStaleBlobs(snapshotIds, originalIndexContainers, originalRootBlobs, newRepositoryData, listener.map(ignored -> null)); + } + + private void cleanupUnlinkedShardLevelBlobs( + RepositoryData originalRepositoryData, + Collection snapshotIds, + Collection shardDeleteResults, + ActionListener listener + ) { + final Iterator filesToDelete = resolveFilesToDelete(originalRepositoryData, snapshotIds, shardDeleteResults); + if (filesToDelete.hasNext() == false) { + listener.onResponse(null); + return; + } + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { + try { + deleteFromContainer(blobContainer(), filesToDelete); + l.onResponse(null); + } catch (Exception e) { + logger.warn(() -> format("%s Failed to delete some blobs during snapshot delete", snapshotIds), e); + throw e; + } + })); + } + private Iterator resolveFilesToDelete( RepositoryData oldRepositoryData, Collection snapshotIds, @@ -3309,57 +3433,6 @@ public String toString() { return "BlobStoreRepository[" + "[" + metadata.name() + "], [" + blobStore.get() + ']' + ']'; } - /** - * Delete snapshot from shard level metadata. - * - * @param indexGeneration generation to write the new shard level level metadata to. If negative a uuid id shard generation should be - * used - */ - private ShardSnapshotMetaDeleteResult deleteFromShardSnapshotMeta( - Set survivingSnapshots, - IndexId indexId, - int snapshotShardId, - Collection snapshotIds, - BlobContainer shardContainer, - Set blobs, - BlobStoreIndexShardSnapshots snapshots, - long indexGeneration - ) { - // Build a list of snapshots that should be preserved - final BlobStoreIndexShardSnapshots updatedSnapshots = snapshots.withRetainedSnapshots(survivingSnapshots); - ShardGeneration writtenGeneration = null; - try { - if (updatedSnapshots.snapshots().isEmpty()) { - return new ShardSnapshotMetaDeleteResult(indexId, snapshotShardId, ShardGenerations.DELETED_SHARD_GEN, blobs); - } else { - if (indexGeneration < 0L) { - writtenGeneration = ShardGeneration.newGeneration(); - INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedSnapshots, shardContainer, writtenGeneration.toBlobNamePart(), compress); - } else { - writtenGeneration = new ShardGeneration(indexGeneration); - writeShardIndexBlobAtomic(shardContainer, indexGeneration, updatedSnapshots, Collections.emptyMap()); - } - final Set survivingSnapshotUUIDs = survivingSnapshots.stream().map(SnapshotId::getUUID).collect(Collectors.toSet()); - return new ShardSnapshotMetaDeleteResult( - indexId, - snapshotShardId, - writtenGeneration, - unusedBlobs(blobs, survivingSnapshotUUIDs, updatedSnapshots) - ); - } - } catch (IOException e) { - throw new RepositoryException( - metadata.name(), - "Failed to finalize snapshot deletion " - + snapshotIds - + " with shard index [" - + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(writtenGeneration.toBlobNamePart()) - + "]", - e - ); - } - } - /** * Utility for atomically writing shard level metadata to a numeric shard generation. This is only required for writing * numeric shard generations where atomic writes with fail-if-already-exists checks are useful in preventing repository corruption. @@ -3381,27 +3454,6 @@ private void writeShardIndexBlobAtomic( ); } - // Unused blobs are all previous index-, data- and meta-blobs and that are not referenced by the new index- as well as all - // temporary blobs - private static List unusedBlobs( - Set blobs, - Set survivingSnapshotUUIDs, - BlobStoreIndexShardSnapshots updatedSnapshots - ) { - return blobs.stream() - .filter( - blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX) - || (blob.startsWith(SNAPSHOT_PREFIX) - && blob.endsWith(".dat") - && survivingSnapshotUUIDs.contains( - blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()) - ) == false) - || (blob.startsWith(UPLOADED_DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null) - || FsBlobContainer.isTempBlobName(blob) - ) - .toList(); - } - /** * Loads information about shard snapshot */ @@ -3581,19 +3633,4 @@ public boolean hasAtomicOverwrites() { public int getReadBufferSizeInBytes() { return bufferSize; } - - /** - * The result of removing a snapshot from a shard folder in the repository. - * - * @param indexId Index that the snapshot was removed from - * @param shardId Shard id that the snapshot was removed from - * @param newGeneration Id of the new index-${uuid} blob that does not include the snapshot any more - * @param blobsToDelete Blob names in the shard directory that have become unreferenced in the new shard generation - */ - private record ShardSnapshotMetaDeleteResult( - IndexId indexId, - int shardId, - ShardGeneration newGeneration, - Collection blobsToDelete - ) {} } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java index cc3d3cd9603a5..324ad736d7248 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotDeleteListener.java @@ -12,12 +12,14 @@ public interface SnapshotDeleteListener { /** - * Invoked once a snapshot has been fully deleted from the repository. + * Invoked once the snapshots have been fully deleted from the repository, including all async cleanup operations, indicating that + * listeners waiting for the end of the deletion can now be notified. */ void onDone(); /** - * Invoked once the updated {@link RepositoryData} has been written to the repository. + * Invoked once the updated {@link RepositoryData} has been written to the repository and it is safe for the next repository operation + * to proceed. * * @param repositoryData updated repository data */ diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index ba2dca8f1083e..938fe9a74e108 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -359,8 +359,8 @@ public void finalizeSnapshot(FinalizeSnapshotContext finalizeSnapshotContext) { @Override public void deleteSnapshots( Collection snapshotIds, - long repositoryStateId, - IndexVersion repositoryMetaVersion, + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, SnapshotDeleteListener listener ) { listener.onFailure(new UnsupportedOperationException()); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index 89b7dd2d3cc1d..d38ff5b1f6a0e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -99,8 +99,8 @@ public void finalizeSnapshot(FinalizeSnapshotContext finalizeSnapshotContext) { @Override public void deleteSnapshots( Collection snapshotIds, - long repositoryStateId, - IndexVersion repositoryMetaVersion, + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, SnapshotDeleteListener listener ) { listener.onFailure(new UnsupportedOperationException()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index fa8f8099900ce..75333a82d4c1a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -302,8 +302,8 @@ public void finalizeSnapshot(FinalizeSnapshotContext finalizeSnapshotContext) { @Override public void deleteSnapshots( Collection snapshotIds, - long repositoryStateId, - IndexVersion repositoryMetaVersion, + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, SnapshotDeleteListener listener ) { listener.onFailure(new UnsupportedOperationException("Unsupported for repository of type: " + TYPE)); From 35737a50d032f26de3354be3c07809e0ede456d7 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 10 Oct 2023 23:51:52 +1100 Subject: [PATCH 113/176] [Test] Explicitly set number_of_shards to 1 (#100171) Some tests rely on the natural index order for test assertions. This works when the index has a single primary shard but fails otherwise. This PR adjusts the relevant tests so that they explicitly configure the number of shards to 1. Relates: ES-6540 --- .../test/aggregations/cardinality_metric.yml | 4 ++++ .../test/aggregations/doc_count_field.yml | 2 ++ .../test/bulk/11_dynamic_templates.yml | 4 ++++ .../test/search.highlight/40_keyword_ignore.yml | 2 ++ .../test/search/210_rescore_explain.yml | 13 ++++++++++--- .../test/search/310_match_bool_prefix.yml | 2 ++ .../java/org/elasticsearch/test/eql/DataLoader.java | 9 ++++++++- .../rest-api-spec/test/versionfield/20_scripts.yml | 2 ++ 8 files changed, 34 insertions(+), 4 deletions(-) diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/cardinality_metric.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/cardinality_metric.yml index b03fec6331168..8270a8c6b5d74 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/cardinality_metric.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/cardinality_metric.yml @@ -3,6 +3,8 @@ setup: indices.create: index: test_1 body: + settings: + number_of_shards: 1 mappings: properties: int_field: @@ -45,6 +47,8 @@ setup: indices.create: index: test_2 body: + settings: + number_of_shards: 1 mappings: properties: other_field: diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/doc_count_field.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/doc_count_field.yml index 574eba61dafbe..cbd642d3583d3 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/doc_count_field.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/doc_count_field.yml @@ -3,6 +3,8 @@ setup: indices.create: index: test_1 body: + settings: + number_of_shards: 1 mappings: properties: str: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml index e0d81e87ffbc0..348d7a6fd0ef1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml @@ -9,6 +9,8 @@ indices.create: index: test_index body: + settings: + number_of_shards: 1 mappings: dynamic_templates: - location: @@ -181,6 +183,8 @@ indices.create: index: test_index body: + settings: + number_of_shards: 1 mappings: dynamic_templates: - location: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/40_keyword_ignore.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/40_keyword_ignore.yml index fe4f36364d2db..5849ed195265f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/40_keyword_ignore.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/40_keyword_ignore.yml @@ -4,6 +4,8 @@ setup: indices.create: index: test-index body: + settings: + number_of_shards: 1 mappings: "properties": "k1": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/210_rescore_explain.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/210_rescore_explain.yml index 4d63a81a99595..50c87747508b7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/210_rescore_explain.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/210_rescore_explain.yml @@ -1,5 +1,12 @@ --- "Score should match explanation in rescore": + - do: + indices.create: + index: test_index + body: + settings: + number_of_shards: 1 + - do: bulk: refresh: true @@ -18,16 +25,16 @@ body: explain: true query: - match_all: {} + match_all: { } rescore: window_size: 2 query: rescore_query: - match_all: {} + match_all: { } query_weight: 5 rescore_query_weight: 10 - - match: {hits.max_score: 15} + - match: { hits.max_score: 15 } - match: { hits.hits.0._score: 15 } - match: { hits.hits.0._explanation.value: 15 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/310_match_bool_prefix.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/310_match_bool_prefix.yml index e9e39ed15ea56..d0b4246ca4b86 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/310_match_bool_prefix.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/310_match_bool_prefix.yml @@ -7,6 +7,8 @@ setup: indices.create: index: test body: + settings: + number_of_shards: 1 mappings: properties: my_field1: diff --git a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java index 1cb087d7702c2..588c2d87f743d 100644 --- a/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java +++ b/x-pack/plugin/eql/qa/common/src/main/java/org/elasticsearch/test/eql/DataLoader.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.test.rest.ESRestTestCase; @@ -136,7 +137,13 @@ private static void load( } private static void createTestIndex(RestHighLevelClient client, String indexName, String mapping) throws IOException { - ESRestTestCase.createIndex(client.getLowLevelClient(), indexName, null, mapping, null); + ESRestTestCase.createIndex( + client.getLowLevelClient(), + indexName, + Settings.builder().put("number_of_shards", 1).build(), + mapping, + null + ); } /** diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/versionfield/20_scripts.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/versionfield/20_scripts.yml index 1f461cd94c987..0bfde800d8bd1 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/versionfield/20_scripts.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/versionfield/20_scripts.yml @@ -12,6 +12,8 @@ setup: indices.create: index: test_index body: + settings: + number_of_shards: 1 mappings: properties: version: From 0efd10169660a98f0284d3e943e670907ee17490 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 10 Oct 2023 05:53:03 -0700 Subject: [PATCH 114/176] Fix ClusterSearchShardsResponseTests#testSerialization (#100557) Fixes serialization random versioning. closes: #100289 --- .../ClusterSearchShardsResponseTests.java | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java index 59679cc150910..e919b4aedf38b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java @@ -8,8 +8,8 @@ package org.elasticsearch.action.admin.cluster.shards; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.TransportVersion; +import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.routing.ShardRouting; @@ -21,13 +21,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.query.RandomQueryBuilder; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; -import org.elasticsearch.test.VersionUtils; import java.util.ArrayList; import java.util.Collections; @@ -37,9 +37,10 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; + public class ClusterSearchShardsResponseTests extends ESTestCase { - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100482") public void testSerialization() throws Exception { Map indicesAndFilters = new HashMap<>(); Set nodes = new HashSet<>(); @@ -51,12 +52,10 @@ public void testSerialization() throws Exception { String nodeId = randomAlphaOfLength(10); ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, randomBoolean(), ShardRoutingState.STARTED); clusterSearchShardsGroups[i] = new ClusterSearchShardsGroup(shardId, new ShardRouting[] { shardRouting }); - DiscoveryNode node = DiscoveryNodeUtils.create( - shardRouting.currentNodeId(), - new TransportAddress(TransportAddress.META_ADDRESS, randomInt(0xFFFF)), - VersionUtils.randomVersion(random()) - ); - nodes.add(node); + DiscoveryNodeUtils.Builder node = DiscoveryNodeUtils.builder(shardRouting.currentNodeId()) + .address(new TransportAddress(TransportAddress.META_ADDRESS, randomInt(0xFFFF))) + .version(randomCompatibleVersion(random(), Version.CURRENT), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()); + nodes.add(node.build()); AliasFilter aliasFilter; if (randomBoolean()) { aliasFilter = AliasFilter.of(RandomQueryBuilder.createQuery(random()), "alias-" + index); From cabb29664916ca874bd5708869fa39049749a7fe Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Tue, 10 Oct 2023 16:00:56 +0200 Subject: [PATCH 115/176] Record unsafe map in live version map archive (#100248) --- .../org/elasticsearch/index/engine/LiveVersionMap.java | 4 ++-- .../elasticsearch/index/engine/LiveVersionMapArchive.java | 8 ++++++++ .../index/engine/LiveVersionMapTestUtils.java | 6 ++++++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 86ab5b8edebe6..ef0901bc17712 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -88,7 +88,7 @@ int size() { return map.size(); } - boolean isUnsafe() { + public boolean isUnsafe() { return unsafe; } @@ -320,7 +320,7 @@ VersionValue getVersionForAssert(final BytesRef uid) { } boolean isUnsafe() { - return maps.current.isUnsafe() || maps.old.isUnsafe(); + return maps.current.isUnsafe() || maps.old.isUnsafe() || archive.isUnsafe(); } void enforceSafeAccess() { diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java index 67804b9b39a20..a68a1cea368d4 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java @@ -31,6 +31,14 @@ public interface LiveVersionMapArchive { */ long getMinDeleteTimestamp(); + /** + * Returns whether the archive has seen an unsafe old map (passed via {@link LiveVersionMapArchive#afterRefresh}) + * which has not yet been refreshed on the unpromotable shards. + */ + default boolean isUnsafe() { + return false; + } + LiveVersionMapArchive NOOP_ARCHIVE = new LiveVersionMapArchive() { @Override public void afterRefresh(LiveVersionMap.VersionLookup old) {} diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java index 9490ceec247e1..a32927ab13bea 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java @@ -45,6 +45,12 @@ public static void putIndex(LiveVersionMap map, String id, IndexVersionValue ver } } + public static void maybePutIndex(LiveVersionMap map, String id, IndexVersionValue version) { + try (Releasable r = acquireLock(map, uid(id))) { + map.maybePutIndexUnderLock(uid(id), version); + } + } + public static void putDelete(LiveVersionMap map, String id, DeleteVersionValue version) { try (Releasable r = acquireLock(map, uid(id))) { map.putDeleteUnderLock(uid(id), version); From a8d79b3ddc87b148a98862d46ab907618dd11643 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 10 Oct 2023 07:03:30 -0700 Subject: [PATCH 116/176] Use separate search execution context for each pipeline (#100563) Synthetic source doesn't seem to work correctly with either inter-segment or intra-segment parallelism. Neither of these parallelisms were available when the synthetic source was developed. The new test fails with the doc or segment data_partitioning. While we are working on a proper fix, this PR introduces a workaround by creating a separate search execution context for each execution pipeline, restoring the sequential execution invariants. I believe that the overhead added by this workaround is minimal. --- x-pack/plugin/esql/build.gradle | 1 + .../compute/lucene/ValueSources.java | 16 ++++- .../lucene/ValuesSourceReaderOperator.java | 5 +- .../operator/OrdinalsGroupingOperator.java | 13 +++- .../ValuesSourceReaderOperatorTests.java | 2 +- .../xpack/esql/action/SyntheticSourceIT.java | 69 +++++++++++++++++++ .../planner/EsPhysicalOperationProviders.java | 20 +++--- 7 files changed, 111 insertions(+), 15 deletions(-) create mode 100644 x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java diff --git a/x-pack/plugin/esql/build.gradle b/x-pack/plugin/esql/build.gradle index a21c3d0990333..9643e2b2d8e1e 100644 --- a/x-pack/plugin/esql/build.gradle +++ b/x-pack/plugin/esql/build.gradle @@ -34,6 +34,7 @@ dependencies { testImplementation('org.webjars.npm:fontsource__roboto-mono:4.5.7') internalClusterTestImplementation project(":client:rest-high-level") + internalClusterTestImplementation project(":modules:mapper-extras") } /* diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java index b7eb47a7a52d3..e5ce5436990b7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.support.FieldContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ShardSearchRequest; import java.io.IOException; import java.util.ArrayList; @@ -49,7 +50,20 @@ public static List sources( List sources = new ArrayList<>(searchContexts.size()); for (SearchContext searchContext : searchContexts) { - SearchExecutionContext ctx = searchContext.getSearchExecutionContext(); + // TODO: remove this workaround + // Create a separate SearchExecutionContext for each ValuesReader, as it seems that + // the synthetic source doesn't work properly with inter-segment or intra-segment parallelism. + ShardSearchRequest shardRequest = searchContext.request(); + SearchExecutionContext ctx = searchContext.readerContext() + .indexService() + .newSearchExecutionContext( + shardRequest.shardId().id(), + shardRequest.shardRequestIndex(), + searchContext.searcher(), + shardRequest::nowInMillis, + shardRequest.getClusterAlias(), + shardRequest.getRuntimeMappings() + ); var fieldType = ctx.getFieldType(fieldName); if (fieldType == null) { sources.add(new ValueSourceInfo(new NullValueSourceType(), new NullValueSource(), elementType, ctx.getIndexReader())); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index b3ac80ee099b7..83fc902bd5077 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -28,6 +28,7 @@ import java.util.Map; import java.util.Objects; import java.util.TreeMap; +import java.util.function.Supplier; /** * Operator that extracts doc_values from a Lucene index out of pages that have been produced by {@link LuceneSourceOperator} @@ -42,12 +43,12 @@ public class ValuesSourceReaderOperator extends AbstractPageMappingOperator { * @param docChannel the channel containing the shard, leaf/segment and doc id * @param field the lucene field being loaded */ - public record ValuesSourceReaderOperatorFactory(List sources, int docChannel, String field) + public record ValuesSourceReaderOperatorFactory(Supplier> sources, int docChannel, String field) implements OperatorFactory { @Override public Operator get(DriverContext driverContext) { - return new ValuesSourceReaderOperator(sources, docChannel, field); + return new ValuesSourceReaderOperator(sources.get(), docChannel, field); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java index 1c068815f1aae..4dab7faa2a074 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java @@ -42,6 +42,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; import static java.util.Objects.requireNonNull; import static java.util.stream.Collectors.joining; @@ -51,7 +52,7 @@ */ public class OrdinalsGroupingOperator implements Operator { public record OrdinalsGroupingOperatorFactory( - List sources, + Supplier> sources, int docChannel, String groupingField, List aggregators, @@ -61,7 +62,15 @@ public record OrdinalsGroupingOperatorFactory( @Override public Operator get(DriverContext driverContext) { - return new OrdinalsGroupingOperator(sources, docChannel, groupingField, aggregators, maxPageSize, bigArrays, driverContext); + return new OrdinalsGroupingOperator( + sources.get(), + docChannel, + groupingField, + aggregators, + maxPageSize, + bigArrays, + driverContext + ); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index 3ce202c0e4608..ec1697e9aedd2 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -109,7 +109,7 @@ static Operator.OperatorFactory factory(IndexReader reader, ValuesSourceType vsT FieldContext fc = new FieldContext(ft.name(), fd, ft); ValuesSource vs = vsType.getField(fc, null); return new ValuesSourceReaderOperator.ValuesSourceReaderOperatorFactory( - List.of(new ValueSourceInfo(vsType, vs, elementType, reader)), + () -> List.of(new ValueSourceInfo(vsType, vs, elementType, reader)), 0, ft.name() ); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java new file mode 100644 index 0000000000000..f0365ce78f44a --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/SyntheticSourceIT.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +public class SyntheticSourceIT extends AbstractEsqlIntegTestCase { + @Override + protected Collection> nodePlugins() { + var plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(MapperExtrasPlugin.class); + return plugins; + } + + public void testMatchOnlyText() throws Exception { + XContentBuilder mapping = JsonXContent.contentBuilder(); + mapping.startObject(); + if (true || randomBoolean()) { + mapping.startObject("_source"); + mapping.field("mode", "synthetic"); + mapping.endObject(); + } + { + mapping.startObject("properties"); + mapping.startObject("uid"); + mapping.field("type", "keyword"); + mapping.endObject(); + mapping.startObject("name"); + mapping.field("type", "match_only_text"); + mapping.endObject(); + mapping.endObject(); + } + mapping.endObject(); + + assertAcked(client().admin().indices().prepareCreate("test").setMapping(mapping)); + + int numDocs = between(10, 1000); + for (int i = 0; i < numDocs; i++) { + IndexRequestBuilder indexRequest = client().prepareIndex("test").setSource("uid", "u" + i); + if (randomInt(100) < 5) { + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + } + indexRequest.get(); + } + client().admin().indices().prepareRefresh("test").get(); + try (EsqlQueryResponse resp = run("from test | keep uid, name | sort uid asc | limit 1")) { + Iterator row = resp.values().next(); + assertThat(row.next(), equalTo("u0")); + assertNull(row.next()); + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index ce5e277deaad8..3131b8c8c1e20 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -15,6 +15,7 @@ import org.elasticsearch.compute.lucene.LuceneOperator; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; +import org.elasticsearch.compute.lucene.ValueSourceInfo; import org.elasticsearch.compute.lucene.ValueSources; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.Operator; @@ -39,10 +40,12 @@ import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.FieldAttribute; +import org.elasticsearch.xpack.ql.type.DataType; import java.util.ArrayList; import java.util.List; import java.util.function.Function; +import java.util.function.Supplier; import static org.elasticsearch.common.lucene.search.Queries.newNonNestedFilter; import static org.elasticsearch.compute.lucene.LuceneSourceOperator.NO_LIMIT; @@ -74,19 +77,18 @@ public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fi layout.append(attr); Layout previousLayout = op.layout; - var sources = ValueSources.sources( + DataType dataType = attr.dataType(); + String fieldName = attr.name(); + Supplier> sources = () -> ValueSources.sources( searchContexts, - attr.name(), - EsqlDataTypes.isUnsupported(attr.dataType()), - LocalExecutionPlanner.toElementType(attr.dataType()) + fieldName, + EsqlDataTypes.isUnsupported(dataType), + LocalExecutionPlanner.toElementType(dataType) ); int docChannel = previousLayout.get(sourceAttr.id()).channel(); - op = op.with( - new ValuesSourceReaderOperator.ValuesSourceReaderOperatorFactory(sources, docChannel, attr.name()), - layout.build() - ); + op = op.with(new ValuesSourceReaderOperator.ValuesSourceReaderOperatorFactory(sources, docChannel, fieldName), layout.build()); } return op; } @@ -173,7 +175,7 @@ public final Operator.OperatorFactory ordinalGroupingOperatorFactory( // The grouping-by values are ready, let's group on them directly. // Costin: why are they ready and not already exposed in the layout? return new OrdinalsGroupingOperator.OrdinalsGroupingOperatorFactory( - ValueSources.sources( + () -> ValueSources.sources( searchContexts, attrSource.name(), EsqlDataTypes.isUnsupported(attrSource.dataType()), From f313479490e1a221c40bea6909530b6cb1055cc2 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 10 Oct 2023 07:04:38 -0700 Subject: [PATCH 117/176] Allow VectorFixedBuilder to become released without build (#100567) VectorFixedBuilder can be closed without calling build and we should move its state to released in this case. --- .../elasticsearch/compute/data/BooleanVectorFixedBuilder.java | 1 + .../org/elasticsearch/compute/data/DoubleVectorFixedBuilder.java | 1 + .../org/elasticsearch/compute/data/IntVectorFixedBuilder.java | 1 + .../org/elasticsearch/compute/data/LongVectorFixedBuilder.java | 1 + .../org/elasticsearch/compute/data/X-VectorFixedBuilder.java.st | 1 + 5 files changed, 5 insertions(+) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorFixedBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorFixedBuilder.java index 93ff57f2336bd..33daff853eecb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorFixedBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorFixedBuilder.java @@ -69,6 +69,7 @@ public BooleanVector build() { public void close() { if (nextIndex >= 0) { // If nextIndex < 0 we've already built the vector + nextIndex = -1; blockFactory.adjustBreaker(-preAdjustedBytes, false); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorFixedBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorFixedBuilder.java index aa698a86b9c4e..7353515e8ffd8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorFixedBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorFixedBuilder.java @@ -69,6 +69,7 @@ public DoubleVector build() { public void close() { if (nextIndex >= 0) { // If nextIndex < 0 we've already built the vector + nextIndex = -1; blockFactory.adjustBreaker(-preAdjustedBytes, false); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorFixedBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorFixedBuilder.java index 3ee3bfb40d6fa..a4755addf0b16 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorFixedBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorFixedBuilder.java @@ -69,6 +69,7 @@ public IntVector build() { public void close() { if (nextIndex >= 0) { // If nextIndex < 0 we've already built the vector + nextIndex = -1; blockFactory.adjustBreaker(-preAdjustedBytes, false); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorFixedBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorFixedBuilder.java index c95b0d07bec55..4a11012e769d8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorFixedBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorFixedBuilder.java @@ -69,6 +69,7 @@ public LongVector build() { public void close() { if (nextIndex >= 0) { // If nextIndex < 0 we've already built the vector + nextIndex = -1; blockFactory.adjustBreaker(-preAdjustedBytes, false); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorFixedBuilder.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorFixedBuilder.java.st index 69993d0945fe3..dfe5bb7622b2a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorFixedBuilder.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorFixedBuilder.java.st @@ -69,6 +69,7 @@ final class $Type$VectorFixedBuilder implements $Type$Vector.FixedBuilder { public void close() { if (nextIndex >= 0) { // If nextIndex < 0 we've already built the vector + nextIndex = -1; blockFactory.adjustBreaker(-preAdjustedBytes, false); } } From b474753b5b71c8cffa10c07108e33f202755e23a Mon Sep 17 00:00:00 2001 From: tmgordeeva Date: Tue, 10 Oct 2023 07:37:07 -0700 Subject: [PATCH 118/176] Test PR for removing replica/shard settings (#100474) Running through CI to see if we still get failures based on replica/shard settings in these tests. --- .../test/aggregate-metrics/110_field_caps.yml | 8 -------- .../test/aggregate-metrics/90_tsdb_mappings.yml | 9 --------- 2 files changed, 17 deletions(-) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/110_field_caps.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/110_field_caps.yml index abf367043d9c8..0c765e39656c7 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/110_field_caps.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/110_field_caps.yml @@ -9,8 +9,6 @@ setup: body: settings: index: - number_of_replicas: 0 - number_of_shards: 2 mode: time_series routing_path: [ metricset, k8s.pod.uid ] time_series: @@ -35,8 +33,6 @@ setup: body: settings: index: - number_of_replicas: 0 - number_of_shards: 2 mode: time_series routing_path: [ metricset, k8s.pod.uid ] time_series: @@ -57,10 +53,6 @@ setup: indices.create: index: test_non_time_series body: - settings: - index: - number_of_replicas: 0 - number_of_shards: 2 mappings: properties: "@timestamp": diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/90_tsdb_mappings.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/90_tsdb_mappings.yml index 05a2c640e68ef..2325a078764fc 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/90_tsdb_mappings.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/90_tsdb_mappings.yml @@ -7,10 +7,6 @@ aggregate_double_metric with time series mappings: indices.create: index: test_index body: - settings: - index: - number_of_replicas: 0 - number_of_shards: 2 mappings: properties: "@timestamp": @@ -51,10 +47,6 @@ aggregate_double_metric with wrong time series mappings: indices.create: index: tsdb_index body: - settings: - index: - number_of_replicas: 0 - number_of_shards: 2 mappings: properties: "@timestamp": @@ -95,7 +87,6 @@ aggregate_double_metric with wrong time series mappings: index: tsdb-fieldcap body: settings: - number_of_replicas: 0 mode: time_series routing_path: [field1] time_series: From 438b246709f7f06677c92e4db9f34971eda5ade1 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Tue, 10 Oct 2023 09:46:17 -0500 Subject: [PATCH 119/176] Excluding 8.10.3 from the RepositoryData BWC test (#100605) This excludes 8.10.3 from the RepositoryData BWC test, related to #100447. --- qa/repository-multi-version/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle index 80d316536e09e..8398e3b8aeb1a 100644 --- a/qa/repository-multi-version/build.gradle +++ b/qa/repository-multi-version/build.gradle @@ -29,7 +29,7 @@ BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> numberOfNodes = 2 setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' - if (v.equals('8.10.0') || v.equals('8.10.1') || v.equals('8.10.2')) { + if (v.equals('8.10.0') || v.equals('8.10.1') || v.equals('8.10.2') || v.equals('8.10.3')) { // 8.10.x versions contain a bogus assertion that trips when reading repositories touched by newer versions // see https://github.com/elastic/elasticsearch/issues/98454 for details jvmArgs '-da' From c8ca0d1c611f228ecd282b4d9d5e7b59385930a4 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Tue, 10 Oct 2023 16:08:13 +0100 Subject: [PATCH 120/176] ESQL Fix array Block ramBytesUsed (#100578) This commit removes the MvOrdering enum from array Block ramBytesUsed. The enum value is shared. --- .../org/elasticsearch/compute/data/BooleanArrayBlock.java | 3 +-- .../org/elasticsearch/compute/data/BytesRefArrayBlock.java | 3 +-- .../org/elasticsearch/compute/data/DoubleArrayBlock.java | 3 +-- .../org/elasticsearch/compute/data/IntArrayBlock.java | 3 +-- .../org/elasticsearch/compute/data/LongArrayBlock.java | 3 +-- .../org/elasticsearch/compute/data/X-ArrayBlock.java.st | 3 +-- .../elasticsearch/compute/data/BlockAccountingTests.java | 2 ++ .../esql/expression/function/AbstractFunctionTestCase.java | 2 +- .../expression/function/scalar/nulls/CoalesceTests.java | 7 ------- 9 files changed, 9 insertions(+), 20 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index adf1282c21fb0..9a66bf00fa71f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -93,8 +93,7 @@ public BooleanBlock expand() { public static long ramBytesEstimated(boolean[] values, int[] firstValueIndexes, BitSet nullsMask) { return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); - // TODO mvordering is shared + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index f46615307f767..9e6631b6807c6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -96,8 +96,7 @@ public BytesRefBlock expand() { public static long ramBytesEstimated(BytesRefArray values, int[] firstValueIndexes, BitSet nullsMask) { return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); - // TODO mvordering is shared + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index b0d77dd71271e..f9e1fe0c6e199 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -93,8 +93,7 @@ public DoubleBlock expand() { public static long ramBytesEstimated(double[] values, int[] firstValueIndexes, BitSet nullsMask) { return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); - // TODO mvordering is shared + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 97791a03c6044..95344bd8367c0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -93,8 +93,7 @@ public IntBlock expand() { public static long ramBytesEstimated(int[] values, int[] firstValueIndexes, BitSet nullsMask) { return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); - // TODO mvordering is shared + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index dddc5296e471e..a45abb1ed9248 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -93,8 +93,7 @@ public LongBlock expand() { public static long ramBytesEstimated(long[] values, int[] firstValueIndexes, BitSet nullsMask) { return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); - // TODO mvordering is shared + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 1f9fb93bc65c6..6a8185b43ecab 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -114,8 +114,7 @@ $endif$ public static long ramBytesEstimated($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int[] firstValueIndexes, BitSet nullsMask) { return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); - // TODO mvordering is shared + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java index bb1cd019273ed..c8364141d8377 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java @@ -252,6 +252,8 @@ public long accumulateObject(Object o, long shallowSize, Map fiel } else { queue.add(entry.getValue()); } + } else if (o instanceof AbstractArrayBlock && entry.getValue() instanceof Block.MvOrdering) { + // skip; MvOrdering is an enum, so instances are shared } else { queue.add(entry.getValue()); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index cdff3f0b5f2ca..3a6479215f479 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -420,7 +420,7 @@ protected void assertSimpleWithNulls(List data, Block value, int nullBlo assertTrue("argument " + nullBlock + " is null", value.isNull(0)); } - public void testEvaluateInManyThreads() throws ExecutionException, InterruptedException { + public final void testEvaluateInManyThreads() throws ExecutionException, InterruptedException { assumeTrue("nothing to do if a type error", testCase.getExpectedTypeError() == null); assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); int count = 10_000; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java index 15d37acbccfcb..8db6b1bbd0c93 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java @@ -28,7 +28,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.concurrent.ExecutionException; import java.util.function.Function; import java.util.function.Supplier; @@ -54,12 +53,6 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(builder.suppliers()); } - @Override - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100559") - public void testEvaluateInManyThreads() throws ExecutionException, InterruptedException { - super.testEvaluateInManyThreads(); - } - @Override protected void assertSimpleWithNulls(List data, Block value, int nullBlock) { for (int i = 0; i < data.size(); i++) { From 99f0b5cb995c3b2a1abaa65461e4f66c66b6ee12 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Tue, 10 Oct 2023 16:08:50 +0100 Subject: [PATCH 121/176] Fix NPE in DocVector ramBytesUsed (#100575) This commit fixes a potential NPE in DocVector::ramBytesUsed. --- .../org/elasticsearch/compute/data/DocVector.java | 6 +++++- .../elasticsearch/compute/data/DocVectorTests.java | 11 +++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java index 8abf0678593ec..44819359e8e44 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java @@ -203,6 +203,10 @@ public boolean equals(Object obj) { return shards.equals(other.shards) && segments.equals(other.segments) && docs.equals(other.docs); } + private static long ramBytesOrZero(int[] array) { + return array == null ? 0 : RamUsageEstimator.shallowSizeOf(array); + } + public static long ramBytesEstimated( IntVector shards, IntVector segments, @@ -211,7 +215,7 @@ public static long ramBytesEstimated( int[] shardSegmentDocMapBackwards ) { return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(shards) + RamUsageEstimator.sizeOf(segments) + RamUsageEstimator.sizeOf(docs) - + RamUsageEstimator.shallowSizeOf(shardSegmentDocMapForwards) + RamUsageEstimator.shallowSizeOf(shardSegmentDocMapBackwards); + + ramBytesOrZero(shardSegmentDocMapForwards) + ramBytesOrZero(shardSegmentDocMapBackwards); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java index 350425840a598..e2eff15fcb769 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java @@ -150,6 +150,17 @@ public void testCannotDoubleRelease() { assertThat(e.getMessage(), containsString("can't build page out of released blocks")); } + public void testRamBytesUsedWithout() { + DocVector docs = new DocVector( + IntBlock.newConstantBlockWith(0, 1).asVector(), + IntBlock.newConstantBlockWith(0, 1).asVector(), + IntBlock.newConstantBlockWith(0, 1).asVector(), + false + ); + assertThat(docs.singleSegmentNonDecreasing(), is(false)); + docs.ramBytesUsed(); // ensure non-singleSegmentNonDecreasing handles nulls in ramByteUsed + } + IntVector intRange(int startInclusive, int endExclusive) { return IntVector.range(startInclusive, endExclusive, BlockFactory.getNonBreakingInstance()); } From 07f6524bd55371744ba9c6ddc650e75dffbb53c2 Mon Sep 17 00:00:00 2001 From: Jonathan Buttner <56361221+jonathan-buttner@users.noreply.github.com> Date: Tue, 10 Oct 2023 11:13:28 -0400 Subject: [PATCH 122/176] [ML] Refactoring inference HTTP Client to allow dynamic settings updates (#100541) * Refactoring to allow for settings updating * Allowing requests to queue * Testing getHttpClient --- .../xpack/inference/InferencePlugin.java | 49 +++-- .../inference/external/http/HttpClient.java | 55 ++---- .../external/http/HttpClientManager.java | 170 ++++++++++++++++++ .../inference/external/http/HttpSettings.java | 68 +------ .../external/http/IdleConnectionEvictor.java | 15 +- .../external/http/HttpClientManagerTests.java | 136 ++++++++++++++ .../external/http/HttpClientTests.java | 53 +++--- 7 files changed, 395 insertions(+), 151 deletions(-) create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClientManager.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientManagerTests.java diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 25439d0bfc930..cc84a5c53c81c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -50,7 +50,7 @@ import org.elasticsearch.xpack.inference.action.TransportGetInferenceModelAction; import org.elasticsearch.xpack.inference.action.TransportInferenceAction; import org.elasticsearch.xpack.inference.action.TransportPutInferenceModelAction; -import org.elasticsearch.xpack.inference.external.http.HttpClient; +import org.elasticsearch.xpack.inference.external.http.HttpClientManager; import org.elasticsearch.xpack.inference.external.http.HttpSettings; import org.elasticsearch.xpack.inference.registry.ModelRegistry; import org.elasticsearch.xpack.inference.rest.RestDeleteInferenceModelAction; @@ -62,13 +62,16 @@ import java.util.Collection; import java.util.List; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; public class InferencePlugin extends Plugin implements ActionPlugin, InferenceServicePlugin, SystemIndexPlugin { public static final String NAME = "inference"; public static final String UTILITY_THREAD_POOL_NAME = "inference_utility"; + public static final String HTTP_CLIENT_SENDER_THREAD_POOL_NAME = "inference_http_client_sender"; private final Settings settings; - private final SetOnce httpClient = new SetOnce<>(); + private final SetOnce httpClientManager = new SetOnce<>(); public InferencePlugin(Settings settings) { this.settings = settings; @@ -119,8 +122,7 @@ public Collection createComponents( AllocationService allocationService, IndicesService indicesService ) { - var httpSettings = new HttpSettings(settings, clusterService); - httpClient.set(HttpClient.create(httpSettings, threadPool)); + httpClientManager.set(HttpClientManager.create(settings, threadPool, clusterService)); ModelRegistry modelRegistry = new ModelRegistry(client); return List.of(modelRegistry); @@ -154,22 +156,35 @@ public Collection getSystemIndexDescriptors(Settings sett } @Override - public List> getExecutorBuilders(Settings unused) { - ScalingExecutorBuilder utility = new ScalingExecutorBuilder( - UTILITY_THREAD_POOL_NAME, - 0, - 1, - TimeValue.timeValueMinutes(10), - false, - "xpack.inference.utility_thread_pool" + public List> getExecutorBuilders(Settings settingsToUse) { + return List.of( + new ScalingExecutorBuilder( + UTILITY_THREAD_POOL_NAME, + 0, + 1, + TimeValue.timeValueMinutes(10), + false, + "xpack.inference.utility_thread_pool" + ), + /* + * This executor is specifically for enqueuing requests to be sent. The underlying + * connection pool used by the http client will block if there are no available connections to lease. + * See here for more info: https://hc.apache.org/httpcomponents-client-4.5.x/current/tutorial/html/connmgmt.html + */ + new ScalingExecutorBuilder( + HTTP_CLIENT_SENDER_THREAD_POOL_NAME, + 0, + 1, + TimeValue.timeValueMinutes(10), + false, + "xpack.inference.http_client_sender_thread_pool" + ) ); - - return List.of(utility); } @Override public List> getSettings() { - return HttpSettings.getSettings(); + return Stream.concat(HttpSettings.getSettings().stream(), HttpClientManager.getSettings().stream()).collect(Collectors.toList()); } @Override @@ -194,8 +209,8 @@ public List getInferenceServiceNamedWriteables() { @Override public void close() { - if (httpClient.get() != null) { - IOUtils.closeWhileHandlingException(httpClient.get()); + if (httpClientManager.get() != null) { + IOUtils.closeWhileHandlingException(httpClientManager.get()); } } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java index 5e3ceac875921..5622ac51ba187 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpClient.java @@ -13,9 +13,6 @@ import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; -import org.apache.http.impl.nio.reactor.DefaultConnectingIOReactor; -import org.apache.http.nio.reactor.ConnectingIOReactor; -import org.apache.http.nio.reactor.IOReactorException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -29,6 +26,7 @@ import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.InferencePlugin.HTTP_CLIENT_SENDER_THREAD_POOL_NAME; import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; public class HttpClient implements Closeable { @@ -41,45 +39,19 @@ enum Status { } private final CloseableHttpAsyncClient client; - private final IdleConnectionEvictor connectionEvictor; private final AtomicReference status = new AtomicReference<>(Status.CREATED); private final ThreadPool threadPool; private final HttpSettings settings; - public static HttpClient create(HttpSettings settings, ThreadPool threadPool) { - PoolingNHttpClientConnectionManager connectionManager = createConnectionManager(); - IdleConnectionEvictor connectionEvictor = new IdleConnectionEvictor( - threadPool, - connectionManager, - settings.getEvictionInterval(), - settings.getEvictionMaxIdle() - ); + public static HttpClient create(HttpSettings settings, ThreadPool threadPool, PoolingNHttpClientConnectionManager connectionManager) { + CloseableHttpAsyncClient client = createAsyncClient(connectionManager); - int maxConnections = settings.getMaxConnections(); - CloseableHttpAsyncClient client = createAsyncClient(connectionManager, maxConnections); - - return new HttpClient(settings, client, connectionEvictor, threadPool); - } - - private static PoolingNHttpClientConnectionManager createConnectionManager() { - ConnectingIOReactor ioReactor; - try { - ioReactor = new DefaultConnectingIOReactor(); - } catch (IOReactorException e) { - var message = "Failed to initialize the inference http client"; - logger.error(message, e); - throw new ElasticsearchException(message, e); - } - - return new PoolingNHttpClientConnectionManager(ioReactor); + return new HttpClient(settings, client, threadPool); } - private static CloseableHttpAsyncClient createAsyncClient(PoolingNHttpClientConnectionManager connectionManager, int maxConnections) { + private static CloseableHttpAsyncClient createAsyncClient(PoolingNHttpClientConnectionManager connectionManager) { HttpAsyncClientBuilder clientBuilder = HttpAsyncClientBuilder.create(); - clientBuilder.setConnectionManager(connectionManager); - clientBuilder.setMaxConnPerRoute(maxConnections); - clientBuilder.setMaxConnTotal(maxConnections); // The apache client will be shared across all connections because it can be expensive to create it // so we don't want to support cookies to avoid accidental authentication for unauthorized users clientBuilder.disableCookieManagement(); @@ -88,24 +60,32 @@ private static CloseableHttpAsyncClient createAsyncClient(PoolingNHttpClientConn } // Default for testing - HttpClient(HttpSettings settings, CloseableHttpAsyncClient asyncClient, IdleConnectionEvictor evictor, ThreadPool threadPool) { + HttpClient(HttpSettings settings, CloseableHttpAsyncClient asyncClient, ThreadPool threadPool) { this.settings = settings; this.threadPool = threadPool; this.client = asyncClient; - this.connectionEvictor = evictor; } public void start() { if (status.compareAndSet(Status.CREATED, Status.STARTED)) { client.start(); - connectionEvictor.start(); } } - public void send(HttpUriRequest request, ActionListener listener) throws IOException { + public void send(HttpUriRequest request, ActionListener listener) { // The caller must call start() first before attempting to send a request assert status.get() == Status.STARTED; + threadPool.executor(HTTP_CLIENT_SENDER_THREAD_POOL_NAME).execute(() -> { + try { + doPrivilegedSend(request, listener); + } catch (IOException e) { + listener.onFailure(new ElasticsearchException(format("Failed to send request [%s]", request.getRequestLine()), e)); + } + }); + } + + private void doPrivilegedSend(HttpUriRequest request, ActionListener listener) throws IOException { SocketAccess.doPrivileged(() -> client.execute(request, new FutureCallback<>() { @Override public void completed(HttpResponse response) { @@ -144,6 +124,5 @@ private void failUsingUtilityThread(Exception exception, ActionListener MAX_CONNECTIONS = Setting.intSetting( + "xpack.inference.http.max_connections", + // TODO pick a reasonable values here + 20, + 1, + 1000, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private static final TimeValue DEFAULT_CONNECTION_EVICTION_THREAD_INTERVAL_TIME = TimeValue.timeValueSeconds(10); + static final Setting CONNECTION_EVICTION_THREAD_INTERVAL_SETTING = Setting.timeSetting( + "xpack.inference.http.connection_eviction_interval", + DEFAULT_CONNECTION_EVICTION_THREAD_INTERVAL_TIME, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private static final TimeValue DEFAULT_CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING = DEFAULT_CONNECTION_EVICTION_THREAD_INTERVAL_TIME; + static final Setting CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING = Setting.timeSetting( + "xpack.inference.http.connection_eviction_max_idle_time", + DEFAULT_CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private final ThreadPool threadPool; + private final PoolingNHttpClientConnectionManager connectionManager; + private EvictorSettings evictorSettings; + private IdleConnectionEvictor connectionEvictor; + private final HttpClient httpClient; + + public static HttpClientManager create(Settings settings, ThreadPool threadPool, ClusterService clusterService) { + PoolingNHttpClientConnectionManager connectionManager = createConnectionManager(); + return new HttpClientManager(settings, connectionManager, threadPool, clusterService); + } + + // Default for testing + HttpClientManager( + Settings settings, + PoolingNHttpClientConnectionManager connectionManager, + ThreadPool threadPool, + ClusterService clusterService + ) { + this.threadPool = threadPool; + + this.connectionManager = connectionManager; + setMaxConnections(MAX_CONNECTIONS.get(settings)); + + this.httpClient = HttpClient.create(new HttpSettings(settings, clusterService), threadPool, connectionManager); + + evictorSettings = new EvictorSettings(settings); + connectionEvictor = createConnectionEvictor(); + + this.addSettingsUpdateConsumers(clusterService); + } + + private static PoolingNHttpClientConnectionManager createConnectionManager() { + ConnectingIOReactor ioReactor; + try { + ioReactor = new DefaultConnectingIOReactor(); + } catch (IOReactorException e) { + var message = "Failed to initialize the inference http client manager"; + logger.error(message, e); + throw new ElasticsearchException(message, e); + } + + return new PoolingNHttpClientConnectionManager(ioReactor); + } + + private void addSettingsUpdateConsumers(ClusterService clusterService) { + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_CONNECTIONS, this::setMaxConnections); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CONNECTION_EVICTION_THREAD_INTERVAL_SETTING, this::setEvictionInterval); + clusterService.getClusterSettings().addSettingsUpdateConsumer(CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING, this::setEvictionMaxIdle); + } + + private IdleConnectionEvictor createConnectionEvictor() { + return new IdleConnectionEvictor(threadPool, connectionManager, evictorSettings.evictionInterval, evictorSettings.evictionMaxIdle); + } + + public static List> getSettings() { + return List.of(MAX_CONNECTIONS, CONNECTION_EVICTION_THREAD_INTERVAL_SETTING, CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING); + } + + public void start() { + httpClient.start(); + connectionEvictor.start(); + } + + public HttpClient getHttpClient() { + return httpClient; + } + + @Override + public void close() throws IOException { + httpClient.close(); + connectionEvictor.stop(); + } + + private void setMaxConnections(int maxConnections) { + connectionManager.setMaxTotal(maxConnections); + connectionManager.setDefaultMaxPerRoute(maxConnections); + } + + // default for testing + void setEvictionInterval(TimeValue evictionInterval) { + evictorSettings = new EvictorSettings(evictionInterval, evictorSettings.evictionMaxIdle); + + connectionEvictor.stop(); + connectionEvictor = createConnectionEvictor(); + connectionEvictor.start(); + } + + void setEvictionMaxIdle(TimeValue evictionMaxIdle) { + evictorSettings = new EvictorSettings(evictorSettings.evictionInterval, evictionMaxIdle); + + connectionEvictor.stop(); + connectionEvictor = createConnectionEvictor(); + connectionEvictor.start(); + } + + private static class EvictorSettings { + private final TimeValue evictionInterval; + private final TimeValue evictionMaxIdle; + + EvictorSettings(Settings settings) { + this.evictionInterval = CONNECTION_EVICTION_THREAD_INTERVAL_SETTING.get(settings); + this.evictionMaxIdle = CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING.get(settings); + } + + EvictorSettings(TimeValue evictionInterval, TimeValue evictionMaxIdle) { + this.evictionInterval = evictionInterval; + this.evictionMaxIdle = evictionMaxIdle; + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java index 420f7822df06c..07d998dff956e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/HttpSettings.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.core.TimeValue; import java.util.List; @@ -26,89 +25,24 @@ public class HttpSettings { Setting.Property.NodeScope, Setting.Property.Dynamic ); - static final Setting MAX_CONNECTIONS = Setting.intSetting( - "xpack.inference.http.max_connections", - 500, - 1, - // TODO pick a reasonable value here - 1000, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - private static final TimeValue DEFAULT_CONNECTION_EVICTION_THREAD_INTERVAL_TIME = TimeValue.timeValueSeconds(10); - - static final Setting CONNECTION_EVICTION_THREAD_INTERVAL_SETTING = Setting.timeSetting( - "xpack.inference.http.connection_eviction_interval", - DEFAULT_CONNECTION_EVICTION_THREAD_INTERVAL_TIME, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - private static final TimeValue DEFAULT_CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING = DEFAULT_CONNECTION_EVICTION_THREAD_INTERVAL_TIME; - static final Setting CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING = Setting.timeSetting( - "xpack.inference.http.connection_eviction_max_idle_time", - DEFAULT_CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); private volatile ByteSizeValue maxResponseSize; - private volatile int maxConnections; - private volatile TimeValue evictionInterval; - private volatile TimeValue evictionMaxIdle; public HttpSettings(Settings settings, ClusterService clusterService) { this.maxResponseSize = MAX_HTTP_RESPONSE_SIZE.get(settings); - this.maxConnections = MAX_CONNECTIONS.get(settings); - this.evictionInterval = CONNECTION_EVICTION_THREAD_INTERVAL_SETTING.get(settings); - this.evictionMaxIdle = CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_HTTP_RESPONSE_SIZE, this::setMaxResponseSize); - clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_CONNECTIONS, this::setMaxConnections); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer(CONNECTION_EVICTION_THREAD_INTERVAL_SETTING, this::setEvictionInterval); - clusterService.getClusterSettings().addSettingsUpdateConsumer(CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING, this::setEvictionMaxIdle); } public ByteSizeValue getMaxResponseSize() { return maxResponseSize; } - public int getMaxConnections() { - return maxConnections; - } - - public TimeValue getEvictionInterval() { - return evictionInterval; - } - - public TimeValue getEvictionMaxIdle() { - return evictionMaxIdle; - } - private void setMaxResponseSize(ByteSizeValue maxResponseSize) { this.maxResponseSize = maxResponseSize; } - private void setMaxConnections(int maxConnections) { - this.maxConnections = maxConnections; - } - - private void setEvictionInterval(TimeValue evictionInterval) { - this.evictionInterval = evictionInterval; - } - - private void setEvictionMaxIdle(TimeValue evictionMaxIdle) { - this.evictionMaxIdle = evictionMaxIdle; - } - public static List> getSettings() { - return List.of( - MAX_HTTP_RESPONSE_SIZE, - MAX_CONNECTIONS, - CONNECTION_EVICTION_THREAD_INTERVAL_SETTING, - CONNECTION_EVICTION_MAX_IDLE_TIME_SETTING - ); + return List.of(MAX_HTTP_RESPONSE_SIZE); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java index 3ea0bc04848e0..f326661adc6f4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictor.java @@ -16,6 +16,7 @@ import java.util.Objects; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; @@ -36,7 +37,7 @@ public class IdleConnectionEvictor { private final NHttpClientConnectionManager connectionManager; private final TimeValue sleepTime; private final TimeValue maxIdleTime; - private Scheduler.Cancellable cancellableTask; + private final AtomicReference cancellableTask = new AtomicReference<>(); public IdleConnectionEvictor( ThreadPool threadPool, @@ -51,13 +52,13 @@ public IdleConnectionEvictor( } public synchronized void start() { - if (cancellableTask == null) { + if (cancellableTask.get() == null) { startInternal(); } } private void startInternal() { - cancellableTask = threadPool.scheduleWithFixedDelay(() -> { + Scheduler.Cancellable task = threadPool.scheduleWithFixedDelay(() -> { try { connectionManager.closeExpiredConnections(); if (maxIdleTime != null) { @@ -67,13 +68,17 @@ private void startInternal() { logger.warn("HTTP connection eviction failed", e); } }, sleepTime, threadPool.executor(UTILITY_THREAD_POOL_NAME)); + + cancellableTask.set(task); } public void stop() { - cancellableTask.cancel(); + if (cancellableTask.get() != null) { + cancellableTask.get().cancel(); + } } public boolean isRunning() { - return cancellableTask != null && cancellableTask.isCancelled() == false; + return cancellableTask.get() != null && cancellableTask.get().isCancelled() == false; } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientManagerTests.java new file mode 100644 index 0000000000000..a9bdee95de5fc --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientManagerTests.java @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.http; + +import org.apache.http.HttpHeaders; +import org.apache.http.impl.nio.conn.PoolingNHttpClientConnectionManager; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentType; +import org.junit.After; +import org.junit.Before; + +import java.nio.charset.StandardCharsets; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; +import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createThreadPool; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class HttpClientManagerTests extends ESTestCase { + private static final TimeValue TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); + + private final MockWebServer webServer = new MockWebServer(); + private ThreadPool threadPool; + + @Before + public void init() throws Exception { + webServer.start(); + threadPool = createThreadPool(getTestName()); + } + + @After + public void shutdown() { + terminate(threadPool); + webServer.close(); + } + + public void testSend_MockServerReceivesRequest() throws Exception { + int responseCode = randomIntBetween(200, 203); + String body = randomAlphaOfLengthBetween(2, 8096); + webServer.enqueue(new MockResponse().setResponseCode(responseCode).setBody(body)); + + String paramKey = randomAlphaOfLength(3); + String paramValue = randomAlphaOfLength(3); + var httpPost = createHttpPost(webServer.getPort(), paramKey, paramValue); + + var manager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty()); + try (var httpClient = manager.getHttpClient()) { + httpClient.start(); + + PlainActionFuture listener = new PlainActionFuture<>(); + httpClient.send(httpPost, listener); + + var result = listener.actionGet(TIMEOUT); + + assertThat(result.response().getStatusLine().getStatusCode(), equalTo(responseCode)); + assertThat(new String(result.body(), StandardCharsets.UTF_8), is(body)); + assertThat(webServer.requests(), hasSize(1)); + assertThat(webServer.requests().get(0).getUri().getPath(), equalTo(httpPost.getURI().getPath())); + assertThat(webServer.requests().get(0).getUri().getQuery(), equalTo(paramKey + "=" + paramValue)); + assertThat(webServer.requests().get(0).getHeader(HttpHeaders.CONTENT_TYPE), equalTo(XContentType.JSON.mediaType())); + } + } + + public void testStartsANewEvictor_WithNewEvictionInterval() { + var threadPool = mock(ThreadPool.class); + var manager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty()); + + var evictionInterval = TimeValue.timeValueSeconds(1); + manager.setEvictionInterval(evictionInterval); + verify(threadPool).scheduleWithFixedDelay(any(Runnable.class), eq(evictionInterval), any()); + } + + public void testStartsANewEvictor_WithNewEvictionMaxIdle() throws InterruptedException { + var mockConnectionManager = mock(PoolingNHttpClientConnectionManager.class); + + Settings settings = Settings.builder() + .put(HttpClientManager.CONNECTION_EVICTION_THREAD_INTERVAL_SETTING.getKey(), TimeValue.timeValueNanos(1)) + .build(); + var manager = new HttpClientManager(settings, mockConnectionManager, threadPool, mockClusterService(settings)); + + CountDownLatch runLatch = new CountDownLatch(1); + doAnswer(invocation -> { + manager.close(); + runLatch.countDown(); + return Void.TYPE; + }).when(mockConnectionManager).closeIdleConnections(anyLong(), any()); + + var evictionMaxIdle = TimeValue.timeValueSeconds(1); + manager.setEvictionMaxIdle(evictionMaxIdle); + runLatch.await(TIMEOUT.getSeconds(), TimeUnit.SECONDS); + + verify(mockConnectionManager, times(1)).closeIdleConnections(eq(evictionMaxIdle.millis()), eq(TimeUnit.MILLISECONDS)); + } + + private static ClusterService mockClusterServiceEmpty() { + return mockClusterService(Settings.EMPTY); + } + + private static ClusterService mockClusterService(Settings settings) { + var clusterService = mock(ClusterService.class); + + var registeredSettings = Stream.concat(HttpClientManager.getSettings().stream(), HttpSettings.getSettings().stream()) + .collect(Collectors.toSet()); + + var cSettings = new ClusterSettings(settings, registeredSettings); + when(clusterService.getClusterSettings()).thenReturn(cSettings); + + return clusterService; + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java index 42c8422af3982..b0b0a34aabf97 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java @@ -45,6 +45,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.inference.InferencePlugin.HTTP_CLIENT_SENDER_THREAD_POOL_NAME; import static org.elasticsearch.xpack.inference.InferencePlugin.UTILITY_THREAD_POOL_NAME; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -64,17 +65,7 @@ public class HttpClientTests extends ESTestCase { @Before public void init() throws Exception { webServer.start(); - threadPool = new TestThreadPool( - getTestName(), - new ScalingExecutorBuilder( - UTILITY_THREAD_POOL_NAME, - 1, - 4, - TimeValue.timeValueMinutes(10), - false, - "xpack.inference.utility_thread_pool" - ) - ); + threadPool = createThreadPool(getTestName()); } @After @@ -92,7 +83,7 @@ public void testSend_MockServerReceivesRequest() throws Exception { String paramValue = randomAlphaOfLength(3); var httpPost = createHttpPost(webServer.getPort(), paramKey, paramValue); - try (var httpClient = HttpClient.create(emptyHttpSettings(), threadPool)) { + try (var httpClient = HttpClient.create(emptyHttpSettings(), threadPool, createConnectionManager())) { httpClient.start(); PlainActionFuture listener = new PlainActionFuture<>(); @@ -119,10 +110,9 @@ public void testSend_FailedCallsOnFailure() throws Exception { return mock(Future.class); }).when(asyncClient).execute(any(), any()); - var evictor = createEvictor(threadPool); var httpPost = createHttpPost(webServer.getPort(), "a", "b"); - try (var client = new HttpClient(emptyHttpSettings(), asyncClient, evictor, threadPool)) { + try (var client = new HttpClient(emptyHttpSettings(), asyncClient, threadPool)) { client.start(); PlainActionFuture listener = new PlainActionFuture<>(); @@ -143,10 +133,9 @@ public void testSend_CancelledCallsOnFailure() throws Exception { return mock(Future.class); }).when(asyncClient).execute(any(), any()); - var evictor = createEvictor(threadPool); var httpPost = createHttpPost(webServer.getPort(), "a", "b"); - try (var client = new HttpClient(emptyHttpSettings(), asyncClient, evictor, threadPool)) { + try (var client = new HttpClient(emptyHttpSettings(), asyncClient, threadPool)) { client.start(); PlainActionFuture listener = new PlainActionFuture<>(); @@ -162,10 +151,9 @@ public void testStart_MultipleCallsOnlyStartTheClientOnce() throws Exception { var asyncClient = mock(CloseableHttpAsyncClient.class); when(asyncClient.execute(any(), any())).thenReturn(mock(Future.class)); - var evictor = createEvictor(threadPool); var httpPost = createHttpPost(webServer.getPort(), "a", "b"); - try (var client = new HttpClient(emptyHttpSettings(), asyncClient, evictor, threadPool)) { + try (var client = new HttpClient(emptyHttpSettings(), asyncClient, threadPool)) { client.start(); PlainActionFuture listener = new PlainActionFuture<>(); @@ -188,7 +176,7 @@ public void testSend_FailsWhenMaxBytesReadIsExceeded() throws Exception { Settings settings = Settings.builder().put(HttpSettings.MAX_HTTP_RESPONSE_SIZE.getKey(), ByteSizeValue.ONE).build(); var httpSettings = createHttpSettings(settings); - try (var httpClient = HttpClient.create(httpSettings, threadPool)) { + try (var httpClient = HttpClient.create(httpSettings, threadPool, createConnectionManager())) { httpClient.start(); PlainActionFuture listener = new PlainActionFuture<>(); @@ -199,7 +187,7 @@ public void testSend_FailsWhenMaxBytesReadIsExceeded() throws Exception { } } - private static HttpPost createHttpPost(int port, String paramKey, String paramValue) throws URISyntaxException { + public static HttpPost createHttpPost(int port, String paramKey, String paramValue) throws URISyntaxException { URI uri = new URIBuilder().setScheme("http") .setHost("localhost") .setPort(port) @@ -219,16 +207,33 @@ private static HttpPost createHttpPost(int port, String paramKey, String paramVa return httpPost; } - private static IdleConnectionEvictor createEvictor(ThreadPool threadPool) throws IOReactorException { - var manager = createConnectionManager(); - return new IdleConnectionEvictor(threadPool, manager, new TimeValue(10, TimeUnit.SECONDS), new TimeValue(10, TimeUnit.SECONDS)); + public static ThreadPool createThreadPool(String name) { + return new TestThreadPool( + name, + new ScalingExecutorBuilder( + UTILITY_THREAD_POOL_NAME, + 1, + 4, + TimeValue.timeValueMinutes(10), + false, + "xpack.inference.utility_thread_pool" + ), + new ScalingExecutorBuilder( + HTTP_CLIENT_SENDER_THREAD_POOL_NAME, + 1, + 4, + TimeValue.timeValueMinutes(10), + false, + "xpack.inference.utility_thread_pool" + ) + ); } private static PoolingNHttpClientConnectionManager createConnectionManager() throws IOReactorException { return new PoolingNHttpClientConnectionManager(new DefaultConnectingIOReactor()); } - private static HttpSettings emptyHttpSettings() { + public static HttpSettings emptyHttpSettings() { return createHttpSettings(Settings.EMPTY); } From 77b4fd12bfc9dceb5e1febc570f1dd57eed43e6a Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 10 Oct 2023 17:16:04 +0200 Subject: [PATCH 123/176] Log aws request metrics (#100272) This change logs amount of requests, exceptions and throttles from aws s3 api aggregated over a tumbling window. --- .../repositories/s3/S3BlobStore.java | 21 ++++- .../repositories/s3/S3RequestRetryStats.java | 87 +++++++++++++++++++ 2 files changed, 106 insertions(+), 2 deletions(-) create mode 100644 modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index f371d6f354763..3ff0497b42719 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -82,6 +82,10 @@ class S3BlobStore implements BlobStore { private final StatsCollectors statsCollectors = new StatsCollectors(); + private static final TimeValue RETRY_STATS_WINDOW = TimeValue.timeValueMinutes(5); + + private volatile S3RequestRetryStats s3RequestRetryStats; + S3BlobStore( S3Service service, String bucket, @@ -105,10 +109,23 @@ class S3BlobStore implements BlobStore { this.threadPool = threadPool; this.snapshotExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT); this.meter = meter; + s3RequestRetryStats = new S3RequestRetryStats(getMaxRetries()); + threadPool.scheduleWithFixedDelay(() -> { + var priorRetryStats = s3RequestRetryStats; + s3RequestRetryStats = new S3RequestRetryStats(getMaxRetries()); + priorRetryStats.emitMetrics(); + }, RETRY_STATS_WINDOW, threadPool.generic()); } RequestMetricCollector getMetricCollector(Operation operation, OperationPurpose purpose) { - return statsCollectors.getMetricCollector(operation, purpose); + var collector = statsCollectors.getMetricCollector(operation, purpose); + return new RequestMetricCollector() { + @Override + public void collectMetrics(Request request, Response response) { + s3RequestRetryStats.addRequest(request); + collector.collectMetrics(request, response); + } + }; } public Executor getSnapshotExecutor() { @@ -178,7 +195,7 @@ public AmazonS3Reference clientReference() { return service.client(repositoryMetadata); } - int getMaxRetries() { + final int getMaxRetries() { return service.settings(repositoryMetadata).maxRetries; } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java new file mode 100644 index 0000000000000..952668f370161 --- /dev/null +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RequestRetryStats.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.amazonaws.Request; +import com.amazonaws.util.AWSRequestMetrics; +import com.amazonaws.util.TimingInfo; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.ESLogMessage; +import org.elasticsearch.common.util.Maps; + +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicLongArray; + +/** + * This class emit aws s3 metrics as logs until we have a proper apm integration + */ +public class S3RequestRetryStats { + + private static final Logger logger = LogManager.getLogger(S3RequestRetryStats.class); + + private final AtomicLong requests = new AtomicLong(); + private final AtomicLong exceptions = new AtomicLong(); + private final AtomicLong throttles = new AtomicLong(); + private final AtomicLongArray exceptionsHistogram; + private final AtomicLongArray throttlesHistogram; + + public S3RequestRetryStats(int maxRetries) { + this.exceptionsHistogram = new AtomicLongArray(maxRetries + 1); + this.throttlesHistogram = new AtomicLongArray(maxRetries + 1); + } + + public void addRequest(Request request) { + if (request == null) { + return; + } + var info = request.getAWSRequestMetrics().getTimingInfo(); + long requests = getCounter(info, AWSRequestMetrics.Field.RequestCount); + long exceptions = getCounter(info, AWSRequestMetrics.Field.Exception); + long throttles = getCounter(info, AWSRequestMetrics.Field.ThrottleException); + + this.requests.addAndGet(requests); + this.exceptions.addAndGet(exceptions); + this.throttles.addAndGet(throttles); + if (exceptions >= 0 && exceptions < this.exceptionsHistogram.length()) { + this.exceptionsHistogram.incrementAndGet((int) exceptions); + } + if (throttles >= 0 && throttles < this.throttlesHistogram.length()) { + this.throttlesHistogram.incrementAndGet((int) throttles); + } + } + + private static long getCounter(TimingInfo info, AWSRequestMetrics.Field field) { + var counter = info.getCounter(field.name()); + return counter != null ? counter.longValue() : 0L; + } + + public void emitMetrics() { + if (logger.isDebugEnabled()) { + var metrics = Maps.newMapWithExpectedSize(3); + metrics.put("elasticsearch.metrics.s3.requests", requests.get()); + metrics.put("elasticsearch.metrics.s3.exceptions", exceptions.get()); + metrics.put("elasticsearch.metrics.s3.throttles", throttles.get()); + for (int i = 0; i < exceptionsHistogram.length(); i++) { + long exceptions = exceptionsHistogram.get(i); + if (exceptions != 0) { + metrics.put("elasticsearch.metrics.s3.exceptions.h" + i, exceptions); + } + } + for (int i = 0; i < throttlesHistogram.length(); i++) { + long throttles = throttlesHistogram.get(i); + if (throttles != 0) { + metrics.put("elasticsearch.metrics.s3.throttles.h" + i, throttles); + } + } + logger.debug(new ESLogMessage().withFields(metrics)); + } + } +} From d9234149982b8fd31fbf6d91840ae0097c4cc7ca Mon Sep 17 00:00:00 2001 From: Julia Bardi <90178898+juliaElastic@users.noreply.github.com> Date: Tue, 10 Oct 2023 17:18:48 +0200 Subject: [PATCH 124/176] added privileges to write metrics-fleet_server* (#100574) --- .../authz/store/KibanaOwnedReservedRoleDescriptors.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index f6046cd41f25c..579638f474b21 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -193,6 +193,8 @@ static RoleDescriptor kibanaSystem(String name) { .build(), // Fleet telemetry queries Agent Logs indices in kibana task runner RoleDescriptor.IndicesPrivileges.builder().indices("logs-elastic_agent*").privileges("read").build(), + // Fleet publishes Agent metrics in kibana task runner + RoleDescriptor.IndicesPrivileges.builder().indices("metrics-fleet_server*").privileges("auto_configure", "write").build(), // Legacy "Alerts as data" used in Security Solution. // Kibana user creates these indices; reads / writes to them. RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_LEGACY_INDEX).privileges("all").build(), From 94d7351b48032faafddc5b4fdf22554f961a88a3 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Tue, 10 Oct 2023 16:21:17 +0100 Subject: [PATCH 125/176] Re-enable org.elasticsearch.xpack.esql.action.EsqlActionIT.testFilterWithNullAndEvalFromIndex (#100604) This commit re-enables org.elasticsearch.xpack.esql.action.EsqlActionIT.testFilterWithNullAndEvalFromIndex, which now passes successfully. --- .../java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index fd4fe13b9c1b1..f10ca17d741d8 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -574,7 +574,6 @@ public void testStringLength() { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") public void testFilterWithNullAndEvalFromIndex() { // append entry, with an absent count, to the index client().prepareBulk().add(new IndexRequest("test").id("no_count").source("data", 12, "data_d", 2d, "color", "red")).get(); From a3ba9f9888c97743c414e10acdfb4879880900cf Mon Sep 17 00:00:00 2001 From: Brandon Morelli Date: Tue, 10 Oct 2023 08:31:03 -0700 Subject: [PATCH 126/176] Update 8.10.3.asciidoc (#100590) (#100614) * Update 8.10.3.asciidoc * Update docs/reference/release-notes/8.10.3.asciidoc Co-authored-by: David Turner * Update docs/reference/release-notes/8.10.3.asciidoc --------- Co-authored-by: David Turner (cherry picked from commit 5ba96f2e2e766324a379fe1c93fff05c056211b3) --- docs/reference/release-notes/8.10.3.asciidoc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/reference/release-notes/8.10.3.asciidoc b/docs/reference/release-notes/8.10.3.asciidoc index a09beb26b4d27..b7828f52ad082 100644 --- a/docs/reference/release-notes/8.10.3.asciidoc +++ b/docs/reference/release-notes/8.10.3.asciidoc @@ -1,7 +1,11 @@ [[release-notes-8.10.3]] == {es} version 8.10.3 -coming[8.10.3] +[[known-issues-8.10.3]] +[float] +=== Known issues + +include::8.10.0.asciidoc[tag=repositorydata-format-change] Also see <>. From 4c96f9358b4ddea87637d34e25f345d821ec783d Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Tue, 10 Oct 2023 16:57:33 +0100 Subject: [PATCH 127/176] [ML] Unmute now fixed JobUpdateTest/testMergeWithJob (#100611) Closes #98626 --- .../elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java index 543360fc24d89..09ff29f768dce 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -258,7 +258,6 @@ protected JobUpdate doParseInstance(XContentParser parser) { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/98626") public void testMergeWithJob() { List detectorUpdates = new ArrayList<>(); List detectionRules1 = Collections.singletonList( From 9ef5d301c299521bdec62226a639c7c1c296242a Mon Sep 17 00:00:00 2001 From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Date: Tue, 10 Oct 2023 12:11:15 -0400 Subject: [PATCH 128/176] Revert "[CI] Mute MlHiddenIndicesFullClusterRestartIT.testMlIndicesBecomeHidden" (#100618) This reverts commit 725da76b70cad8e864bd150ac76f08d1f3312a8a. --- .../xpack/restart/MlHiddenIndicesFullClusterRestartIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java index 79a2be51197e6..aeb3dad547946 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MlHiddenIndicesFullClusterRestartIT.java @@ -73,7 +73,6 @@ public void waitForMlTemplates() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/93521") public void testMlIndicesBecomeHidden() throws Exception { if (isRunningAgainstOldCluster()) { // trigger ML indices creation From 7ba224ea62838676d17e9b48e910119a7fc644d4 Mon Sep 17 00:00:00 2001 From: Jason Bryan Date: Tue, 10 Oct 2023 12:49:39 -0400 Subject: [PATCH 129/176] Bump versions after 7.17.14 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 16 ++++++++++++++++ .buildkite/pipelines/periodic.yml | 10 ++++++++++ .ci/bwcVersions | 1 + .ci/snapshotBwcVersions | 2 +- .../src/main/java/org/elasticsearch/Version.java | 1 + 6 files changed, 30 insertions(+), 2 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 6f0657c3d5e8e..4cc59424db736 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -40,7 +40,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.14", "8.10.3", "8.11.0", "8.12.0"] + BWC_VERSION: ["7.17.15", "8.10.3", "8.11.0", "8.12.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 844570d945fdf..cf8b35cb941e4 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1056,6 +1056,22 @@ steps: env: BWC_VERSION: 7.17.14 + - label: "{{matrix.image}} / 7.17.15 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.15 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.15 + - label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 8e959b07a9bc1..846cd3176593b 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -642,6 +642,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.14 + - label: 7.17.15 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.15#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.15 - label: 8.0.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest timeout_in_minutes: 300 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 536a6cbf2a3b2..29942fe6032ad 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -63,6 +63,7 @@ BWC_VERSION: - "7.17.12" - "7.17.13" - "7.17.14" + - "7.17.15" - "8.0.0" - "8.0.1" - "8.1.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 7ad88baffac95..6b586a1d49f6c 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "7.17.14" + - "7.17.15" - "8.10.3" - "8.11.0" - "8.12.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 4d578e77e56bc..329e6eacb0bbe 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -114,6 +114,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_12 = new Version(7_17_12_99); public static final Version V_7_17_13 = new Version(7_17_13_99); public static final Version V_7_17_14 = new Version(7_17_14_99); + public static final Version V_7_17_15 = new Version(7_17_15_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); public static final Version V_8_1_0 = new Version(8_01_00_99); From 0ccbf44865eb98f1f1265b31c08ecd306460f264 Mon Sep 17 00:00:00 2001 From: Jason Bryan Date: Tue, 10 Oct 2023 12:53:11 -0400 Subject: [PATCH 130/176] Prune changelogs after 7.17.14 release --- docs/changelog/100106.yaml | 5 ----- docs/changelog/100134.yaml | 5 ----- docs/changelog/100179.yaml | 6 ------ docs/changelog/100207.yaml | 5 ----- docs/changelog/100284.yaml | 5 ----- docs/changelog/99231.yaml | 5 ----- docs/changelog/99604.yaml | 5 ----- docs/changelog/99660.yaml | 5 ----- docs/changelog/99673.yaml | 5 ----- docs/changelog/99677.yaml | 5 ----- docs/changelog/99724.yaml | 5 ----- docs/changelog/99738.yaml | 6 ------ docs/changelog/99803.yaml | 5 ----- docs/changelog/99814.yaml | 6 ------ docs/changelog/99818.yaml | 6 ------ docs/changelog/99846.yaml | 5 ----- docs/changelog/99868.yaml | 6 ------ docs/changelog/99892.yaml | 6 ------ docs/changelog/99914.yaml | 5 ----- docs/changelog/99946.yaml | 5 ----- 20 files changed, 106 deletions(-) delete mode 100644 docs/changelog/100106.yaml delete mode 100644 docs/changelog/100134.yaml delete mode 100644 docs/changelog/100179.yaml delete mode 100644 docs/changelog/100207.yaml delete mode 100644 docs/changelog/100284.yaml delete mode 100644 docs/changelog/99231.yaml delete mode 100644 docs/changelog/99604.yaml delete mode 100644 docs/changelog/99660.yaml delete mode 100644 docs/changelog/99673.yaml delete mode 100644 docs/changelog/99677.yaml delete mode 100644 docs/changelog/99724.yaml delete mode 100644 docs/changelog/99738.yaml delete mode 100644 docs/changelog/99803.yaml delete mode 100644 docs/changelog/99814.yaml delete mode 100644 docs/changelog/99818.yaml delete mode 100644 docs/changelog/99846.yaml delete mode 100644 docs/changelog/99868.yaml delete mode 100644 docs/changelog/99892.yaml delete mode 100644 docs/changelog/99914.yaml delete mode 100644 docs/changelog/99946.yaml diff --git a/docs/changelog/100106.yaml b/docs/changelog/100106.yaml deleted file mode 100644 index c3e3d50d2597a..0000000000000 --- a/docs/changelog/100106.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100106 -summary: Validate enrich index before completing policy execution -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/100134.yaml b/docs/changelog/100134.yaml deleted file mode 100644 index 3836ec2793050..0000000000000 --- a/docs/changelog/100134.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100134 -summary: Implement matches() on `SourceConfirmedTextQuery` -area: Highlighting -type: enhancement -issues: [] diff --git a/docs/changelog/100179.yaml b/docs/changelog/100179.yaml deleted file mode 100644 index 2b7824c1575e6..0000000000000 --- a/docs/changelog/100179.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100179 -summary: ILM introduce the `check-ts-end-time-passed` step -area: ILM+SLM -type: bug -issues: - - 99696 diff --git a/docs/changelog/100207.yaml b/docs/changelog/100207.yaml deleted file mode 100644 index 10e55992f0e45..0000000000000 --- a/docs/changelog/100207.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100207 -summary: ILM the delete action waits for a TSDS index time/bounds to lapse -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/100284.yaml b/docs/changelog/100284.yaml deleted file mode 100644 index 956fc472d6656..0000000000000 --- a/docs/changelog/100284.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100284 -summary: Defend against negative datafeed start times -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/99231.yaml b/docs/changelog/99231.yaml deleted file mode 100644 index 9f5dfa1137587..0000000000000 --- a/docs/changelog/99231.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99231 -summary: Add manage permission for fleet managed threat intel indices -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/99604.yaml b/docs/changelog/99604.yaml deleted file mode 100644 index 0bace7aef1b26..0000000000000 --- a/docs/changelog/99604.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99604 -summary: Show a concrete error when the enrich index does not exist rather than a NullPointerException -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/99660.yaml b/docs/changelog/99660.yaml deleted file mode 100644 index ea19e24d51fff..0000000000000 --- a/docs/changelog/99660.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99660 -summary: Close expired search contexts on SEARCH thread -area: Search -type: bug -issues: [] diff --git a/docs/changelog/99673.yaml b/docs/changelog/99673.yaml deleted file mode 100644 index b48d620b21f49..0000000000000 --- a/docs/changelog/99673.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99673 -summary: Adding retry logic for start model deployment API -area: Machine Learning -type: bug -issues: [ ] diff --git a/docs/changelog/99677.yaml b/docs/changelog/99677.yaml deleted file mode 100644 index 04c1c28cf2e12..0000000000000 --- a/docs/changelog/99677.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99677 -summary: Using 1 MB chunks for elser model storage -area: Machine Learning -type: bug -issues: [ ] diff --git a/docs/changelog/99724.yaml b/docs/changelog/99724.yaml deleted file mode 100644 index 4fe78687bf72b..0000000000000 --- a/docs/changelog/99724.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99724 -summary: Upgrade bundled JDK to Java 21 -area: Packaging -type: upgrade -issues: [] diff --git a/docs/changelog/99738.yaml b/docs/changelog/99738.yaml deleted file mode 100644 index 1b65926aed741..0000000000000 --- a/docs/changelog/99738.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99738 -summary: Ignore "index not found" error when `delete_dest_index` flag is set but the - dest index doesn't exist -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/99803.yaml b/docs/changelog/99803.yaml deleted file mode 100644 index ce0929eb20e07..0000000000000 --- a/docs/changelog/99803.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99803 -summary: Do not use PIT in the presence of remote indices in source -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/99814.yaml b/docs/changelog/99814.yaml deleted file mode 100644 index 1632be42b4e4c..0000000000000 --- a/docs/changelog/99814.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99814 -summary: Fix cardinality agg for `const_keyword` -area: Aggregations -type: bug -issues: - - 99776 diff --git a/docs/changelog/99818.yaml b/docs/changelog/99818.yaml deleted file mode 100644 index 8835bcf28e050..0000000000000 --- a/docs/changelog/99818.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99818 -summary: Add checks in term and terms queries that input terms are not too long -area: Search -type: enhancement -issues: - - 99802 diff --git a/docs/changelog/99846.yaml b/docs/changelog/99846.yaml deleted file mode 100644 index 198b0b6f939ac..0000000000000 --- a/docs/changelog/99846.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99846 -summary: Update version range in `jvm.options` for the Panama Vector API -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/99868.yaml b/docs/changelog/99868.yaml deleted file mode 100644 index 33d582f9ebd0a..0000000000000 --- a/docs/changelog/99868.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99868 -summary: Fix fields API for `geo_point` fields inside other arrays -area: Search -type: bug -issues: - - 99781 diff --git a/docs/changelog/99892.yaml b/docs/changelog/99892.yaml deleted file mode 100644 index 5090d1d888b65..0000000000000 --- a/docs/changelog/99892.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99892 -summary: Support $ and / in restore rename replacements -area: Snapshot/Restore -type: bug -issues: - - 99078 diff --git a/docs/changelog/99914.yaml b/docs/changelog/99914.yaml deleted file mode 100644 index 8b0026a8ff9ca..0000000000000 --- a/docs/changelog/99914.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99914 -summary: Let `_stats` internally timeout if checkpoint information can not be retrieved -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/99946.yaml b/docs/changelog/99946.yaml deleted file mode 100644 index 11dc4090baa0e..0000000000000 --- a/docs/changelog/99946.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99946 -summary: Skip settings validation during desired nodes updates -area: Distributed -type: bug -issues: [] From e723c7aacdb6d466bde71f62b267c99259072ec3 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Tue, 10 Oct 2023 19:33:30 +0200 Subject: [PATCH 131/176] Switch visibility to public in ESQL REST spec (#100622) This update the visibility field in ESQL's REST spec to public. It also updates the types of quotes used for one the REST object parameter to backticks, for consistency. --- .../src/main/resources/rest-api-spec/api/esql.query.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json index ffcd30fa6c717..c038ac4f3b749 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json @@ -5,7 +5,7 @@ "description":"Executes an ESQL request" }, "stability":"experimental", - "visibility":"private", + "visibility":"public", "headers":{ "accept": [ "application/json"], "content_type": ["application/json"] @@ -32,7 +32,7 @@ } }, "body":{ - "description":"Use the `query` element to start a query. Use `time_zone` to specify an execution time zone and 'columnar' to format the answer.", + "description":"Use the `query` element to start a query. Use `time_zone` to specify an execution time zone and `columnar` to format the answer.", "required":true } } From b280a63eb70a040db7006665c67404c8a1ffdfde Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Tue, 10 Oct 2023 14:12:36 -0400 Subject: [PATCH 132/176] [CI] Disable jenkins platform-support jobs, and re-enable all Buildkite periodic pipelines (#100630) --- .buildkite/scripts/periodic.trigger.sh | 34 +++++++------------ ...icsearch+multijob+platform-support-arm.yml | 15 ++++---- ...csearch+multijob+platform-support-unix.yml | 5 +-- ...arch+multijob+platform-support-windows.yml | 15 ++++---- ...arch+periodic+platform-support-trigger.yml | 6 ---- ...lasticsearch+periodic+platform-support.yml | 3 +- 6 files changed, 33 insertions(+), 45 deletions(-) delete mode 100644 .ci/jobs.t/elastic+elasticsearch+periodic+platform-support-trigger.yml diff --git a/.buildkite/scripts/periodic.trigger.sh b/.buildkite/scripts/periodic.trigger.sh index 754c701927185..3571d112c5b6d 100755 --- a/.buildkite/scripts/periodic.trigger.sh +++ b/.buildkite/scripts/periodic.trigger.sh @@ -12,6 +12,18 @@ for BRANCH in "${BRANCHES[@]}"; do LAST_GOOD_COMMIT=$(echo "${BUILD_JSON}" | jq -r '.commit') cat < Date: Tue, 10 Oct 2023 13:10:38 -0500 Subject: [PATCH 133/176] Bump versions after 8.10.3 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 16 ++++++++++++++++ .buildkite/pipelines/periodic.yml | 10 ++++++++++ .ci/bwcVersions | 1 + .ci/snapshotBwcVersions | 2 +- .../src/main/java/org/elasticsearch/Version.java | 1 + 6 files changed, 30 insertions(+), 2 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 4cc59424db736..06b9f1dfbb6bf 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -40,7 +40,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.15", "8.10.3", "8.11.0", "8.12.0"] + BWC_VERSION: ["7.17.15", "8.10.4", "8.11.0", "8.12.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index cf8b35cb941e4..a265f7cd0cd4c 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1664,6 +1664,22 @@ steps: env: BWC_VERSION: 8.10.3 + - label: "{{matrix.image}} / 8.10.4 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.10.4 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.10.4 + - label: "{{matrix.image}} / 8.11.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 846cd3176593b..8143110607da2 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1022,6 +1022,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.3 + - label: 8.10.4 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.10.4#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.10.4 - label: 8.11.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest timeout_in_minutes: 300 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 29942fe6032ad..0a17a776e44a2 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -101,5 +101,6 @@ BWC_VERSION: - "8.10.1" - "8.10.2" - "8.10.3" + - "8.10.4" - "8.11.0" - "8.12.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 6b586a1d49f6c..a27c1532720fa 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - "7.17.15" - - "8.10.3" + - "8.10.4" - "8.11.0" - "8.12.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 329e6eacb0bbe..69eaf17addb88 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -152,6 +152,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_10_1 = new Version(8_10_01_99); public static final Version V_8_10_2 = new Version(8_10_02_99); public static final Version V_8_10_3 = new Version(8_10_03_99); + public static final Version V_8_10_4 = new Version(8_10_04_99); public static final Version V_8_11_0 = new Version(8_11_00_99); public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version CURRENT = V_8_12_0; From 4b0a6cd59895817f45c5e67dd9cf363fadd6952f Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 10 Oct 2023 11:24:54 -0700 Subject: [PATCH 134/176] Capture JVM crash dump logs in uploaded artifact bundle (#100627) --- .../src/main/groovy/elasticsearch.build-complete.gradle | 1 + 1 file changed, 1 insertion(+) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle index 7cb66be729374..f80cfa566a518 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle @@ -28,6 +28,7 @@ if (buildNumber && performanceTest == null && GradleUtils.isIncludedBuild(projec include("**/build/test-results/**/*.xml") include("**/build/testclusters/**") include("**/build/testrun/*/temp/**") + include("**/build/**/hs_err_pid*.log") exclude("**/build/testclusters/**/data/**") exclude("**/build/testclusters/**/distro/**") exclude("**/build/testclusters/**/repo/**") From 3465a2bf183d157397aaea2772c59d2e28dbbe45 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 10 Oct 2023 20:28:02 +0200 Subject: [PATCH 135/176] Fix metric gauge creation model (#100609) OTEL gauges should follow the callback model otherwise they will not be sent by apm java agent. (or use BatchCallback) This commit changes the gagues creation model to return Observable*Gauge and uses AtomicLong/Double to store current value which will be polled when metrics are exported (and callback is called) --- docs/changelog/100609.yaml | 5 + .../internal/metrics/DoubleGaugeAdapter.java | 25 +++- .../internal/metrics/LongGaugeAdapter.java | 20 ++- .../internal/metrics/GaugeAdapterTests.java | 123 ++++++++++++++++++ 4 files changed, 162 insertions(+), 11 deletions(-) create mode 100644 docs/changelog/100609.yaml create mode 100644 modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java diff --git a/docs/changelog/100609.yaml b/docs/changelog/100609.yaml new file mode 100644 index 0000000000000..c1c63c1af5d4d --- /dev/null +++ b/docs/changelog/100609.yaml @@ -0,0 +1,5 @@ +pr: 100609 +summary: Fix metric gauge creation model +area: Infra/Core +type: bug +issues: [] diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java index 9d55d475d4a93..54f33be21698b 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java @@ -10,33 +10,46 @@ import io.opentelemetry.api.metrics.Meter; +import java.util.Collections; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; /** * DoubleGaugeAdapter wraps an otel ObservableDoubleMeasurement */ -public class DoubleGaugeAdapter extends AbstractInstrument +public class DoubleGaugeAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.DoubleGauge { + private final AtomicReference valueWithAttributes; + public DoubleGaugeAdapter(Meter meter, String name, String description, String unit) { super(meter, name, description, unit); + this.valueWithAttributes = new AtomicReference<>(new ValueWithAttributes(0.0, Collections.emptyMap())); } @Override - io.opentelemetry.api.metrics.ObservableDoubleMeasurement buildInstrument(Meter meter) { - var builder = Objects.requireNonNull(meter).gaugeBuilder(getName()); - return builder.setDescription(getDescription()).setUnit(getUnit()).buildObserver(); + io.opentelemetry.api.metrics.ObservableDoubleGauge buildInstrument(Meter meter) { + return Objects.requireNonNull(meter) + .gaugeBuilder(getName()) + .setDescription(getDescription()) + .setUnit(getUnit()) + .buildWithCallback(measurement -> { + var localValueWithAttributed = valueWithAttributes.get(); + measurement.record(localValueWithAttributed.value(), OtelHelper.fromMap(localValueWithAttributed.attributes())); + }); } @Override public void record(double value) { - getInstrument().record(value); + record(value, Collections.emptyMap()); } @Override public void record(double value, Map attributes) { - getInstrument().record(value, OtelHelper.fromMap(attributes)); + this.valueWithAttributes.set(new ValueWithAttributes(value, attributes)); } + + private record ValueWithAttributes(double value, Map attributes) {} } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java index 48430285a5173..66d2287a765dc 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java @@ -10,37 +10,47 @@ import io.opentelemetry.api.metrics.Meter; +import java.util.Collections; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; /** * LongGaugeAdapter wraps an otel ObservableLongMeasurement */ -public class LongGaugeAdapter extends AbstractInstrument +public class LongGaugeAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.LongGauge { + private final AtomicReference valueWithAttributes; public LongGaugeAdapter(Meter meter, String name, String description, String unit) { super(meter, name, description, unit); + this.valueWithAttributes = new AtomicReference<>(new ValueWithAttributes(0L, Collections.emptyMap())); } @Override - io.opentelemetry.api.metrics.ObservableLongMeasurement buildInstrument(Meter meter) { + io.opentelemetry.api.metrics.ObservableLongGauge buildInstrument(Meter meter) { + return Objects.requireNonNull(meter) .gaugeBuilder(getName()) .ofLongs() .setDescription(getDescription()) .setUnit(getUnit()) - .buildObserver(); + .buildWithCallback(measurement -> { + var localValueWithAttributed = valueWithAttributes.get(); + measurement.record(localValueWithAttributed.value(), OtelHelper.fromMap(localValueWithAttributed.attributes())); + }); } @Override public void record(long value) { - getInstrument().record(value); + record(value, Collections.emptyMap()); } @Override public void record(long value, Map attributes) { - getInstrument().record(value, OtelHelper.fromMap(attributes)); + this.valueWithAttributes.set(new ValueWithAttributes(value, attributes)); } + + private record ValueWithAttributes(long value, Map attributes) {} } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java new file mode 100644 index 0000000000000..1e230eefe32dc --- /dev/null +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.apm.internal.metrics; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleGaugeBuilder; +import io.opentelemetry.api.metrics.LongGaugeBuilder; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.ObservableDoubleMeasurement; +import io.opentelemetry.api.metrics.ObservableLongMeasurement; + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; +import org.junit.Before; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +import java.util.Map; +import java.util.function.Consumer; + +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class GaugeAdapterTests extends ESTestCase { + Meter testMeter = Mockito.mock(Meter.class); + LongGaugeBuilder longGaugeBuilder = Mockito.mock(LongGaugeBuilder.class); + DoubleGaugeBuilder mockDoubleGaugeBuilder = Mockito.mock(DoubleGaugeBuilder.class); + + @Before + public void init() { + when(longGaugeBuilder.setDescription(Mockito.anyString())).thenReturn(longGaugeBuilder); + when(longGaugeBuilder.setUnit(Mockito.anyString())).thenReturn(longGaugeBuilder); + + + when(mockDoubleGaugeBuilder.ofLongs()).thenReturn(longGaugeBuilder); + when(mockDoubleGaugeBuilder.setUnit(Mockito.anyString())).thenReturn(mockDoubleGaugeBuilder); + when(mockDoubleGaugeBuilder.setDescription(Mockito.anyString())).thenReturn(mockDoubleGaugeBuilder); + when(testMeter.gaugeBuilder(anyString())).thenReturn(mockDoubleGaugeBuilder); + } + + // testing that a value reported is then used in a callback + @SuppressWarnings("unchecked") + public void testLongGaugeRecord() { + LongGaugeAdapter longGaugeAdapter = new LongGaugeAdapter(testMeter, "name", "desc", "unit"); + + // recording a value + longGaugeAdapter.record(1L, Map.of("k", 1L)); + + // upon metric export, the consumer will be called + ArgumentCaptor> captor = ArgumentCaptor.forClass(Consumer.class); + verify(longGaugeBuilder).buildWithCallback(captor.capture()); + + Consumer value = captor.getValue(); + // making sure that a consumer will fetch the value passed down upon recording of a value + TestLongMeasurement testLongMeasurement = new TestLongMeasurement(); + value.accept(testLongMeasurement); + + assertThat(testLongMeasurement.value, Matchers.equalTo(1L)); + assertThat(testLongMeasurement.attributes, Matchers.equalTo(Attributes.builder().put("k", 1).build())); + } + + // testing that a value reported is then used in a callback + @SuppressWarnings("unchecked") + public void testDoubleGaugeRecord() { + DoubleGaugeAdapter doubleGaugeAdapter = new DoubleGaugeAdapter(testMeter, "name", "desc", "unit"); + + // recording a value + doubleGaugeAdapter.record(1.0, Map.of("k", 1.0)); + + // upon metric export, the consumer will be called + ArgumentCaptor> captor = ArgumentCaptor.forClass(Consumer.class); + verify(mockDoubleGaugeBuilder).buildWithCallback(captor.capture()); + + Consumer value = captor.getValue(); + // making sure that a consumer will fetch the value passed down upon recording of a value + TestDoubleMeasurement testLongMeasurement = new TestDoubleMeasurement(); + value.accept(testLongMeasurement); + + assertThat(testLongMeasurement.value, Matchers.equalTo(1.0)); + assertThat(testLongMeasurement.attributes, Matchers.equalTo(Attributes.builder().put("k", 1.0).build())); + } + + private static class TestDoubleMeasurement implements ObservableDoubleMeasurement { + double value; + Attributes attributes; + + @Override + public void record(double value) { + this.value = value; + } + + @Override + public void record(double value, Attributes attributes) { + this.value = value; + this.attributes = attributes; + + } + } + + private static class TestLongMeasurement implements ObservableLongMeasurement { + long value; + Attributes attributes; + + @Override + public void record(long value) { + this.value = value; + } + + @Override + public void record(long value, Attributes attributes) { + this.value = value; + this.attributes = attributes; + + } + } +} From 7836c18e58c994ac4543a4b5bdd17ebdf633437c Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Tue, 10 Oct 2023 20:41:42 +0100 Subject: [PATCH 136/176] fix BlockAccountingTests and add a new filter test scenario (#100600) This commit fixes a failure in BlockAccountingTests, and adds a new filter test scenario that filters on the last position of a multivalue. --- .../compute/data/BlockAccountingTests.java | 72 ++++++++++--------- .../compute/data/FilteredBlockTests.java | 69 ++++++++++++++++++ 2 files changed, 106 insertions(+), 35 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java index c8364141d8377..d62fd75abbcdd 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java @@ -45,7 +45,7 @@ public void testBooleanVector() { Vector emptyPlusOne = new BooleanArrayVector(new boolean[] { randomBoolean() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + 1))); - boolean[] randomData = new boolean[randomIntBetween(1, 1024)]; + boolean[] randomData = new boolean[randomIntBetween(2, 1024)]; Vector emptyPlusSome = new BooleanArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + randomData.length))); @@ -61,7 +61,7 @@ public void testIntVector() { Vector emptyPlusOne = new IntArrayVector(new int[] { randomInt() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Integer.BYTES))); - int[] randomData = new int[randomIntBetween(1, 1024)]; + int[] randomData = new int[randomIntBetween(2, 1024)]; Vector emptyPlusSome = new IntArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Integer.BYTES * randomData.length))); @@ -77,7 +77,7 @@ public void testLongVector() { Vector emptyPlusOne = new LongArrayVector(new long[] { randomLong() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + Long.BYTES)); - long[] randomData = new long[randomIntBetween(1, 1024)]; + long[] randomData = new long[randomIntBetween(2, 1024)]; Vector emptyPlusSome = new LongArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + (long) Long.BYTES * randomData.length)); @@ -93,7 +93,7 @@ public void testDoubleVector() { Vector emptyPlusOne = new DoubleArrayVector(new double[] { randomDouble() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + Double.BYTES)); - double[] randomData = new double[randomIntBetween(1, 1024)]; + double[] randomData = new double[randomIntBetween(2, 1024)]; Vector emptyPlusSome = new DoubleArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + (long) Double.BYTES * randomData.length)); @@ -130,13 +130,11 @@ public void testBooleanBlock() { Block emptyPlusOne = new BooleanArrayBlock(new boolean[] { randomBoolean() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + 1) + alignObjectSize(Integer.BYTES))); - boolean[] randomData = new boolean[randomIntBetween(1, 1024)]; - int[] valueIndices = IntStream.range(0, randomData.length).toArray(); + boolean[] randomData = new boolean[randomIntBetween(2, 1024)]; + int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); Block emptyPlusSome = new BooleanArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); - assertThat( - emptyPlusSome.ramBytesUsed(), - is(alignObjectSize(empty.ramBytesUsed() + randomData.length) + alignObjectSize(valueIndices.length * Integer.BYTES)) - ); + long expected = empty.ramBytesUsed() + ramBytesForBooleanArray(randomData) + ramBytesForIntArray(valueIndices); + assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); @@ -148,7 +146,6 @@ public void testBooleanBlockWithNullFirstValues() { assertThat(empty.ramBytesUsed(), lessThanOrEqualTo(expectedEmptyUsed)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100586") public void testIntBlock() { Block empty = new IntArrayBlock(new int[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); @@ -157,10 +154,11 @@ public void testIntBlock() { Block emptyPlusOne = new IntArrayBlock(new int[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + alignObjectSize(Integer.BYTES) + alignObjectSize(Integer.BYTES))); - int[] randomData = new int[randomIntBetween(1, 1024)]; - int[] valueIndices = IntStream.range(0, randomData.length).toArray(); + int[] randomData = new int[randomIntBetween(2, 1024)]; + int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); Block emptyPlusSome = new IntArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); - assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + alignObjectSize((long) Integer.BYTES * randomData.length) * 2)); + long expected = empty.ramBytesUsed() + ramBytesForIntArray(randomData) + ramBytesForIntArray(valueIndices); + assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); @@ -180,17 +178,11 @@ public void testLongBlock() { Block emptyPlusOne = new LongArrayBlock(new long[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Long.BYTES) + alignObjectSize(Integer.BYTES))); - long[] randomData = new long[randomIntBetween(1, 1024)]; - int[] valueIndices = IntStream.range(0, randomData.length).toArray(); + long[] randomData = new long[randomIntBetween(2, 1024)]; + int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); Block emptyPlusSome = new LongArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); - assertThat( - emptyPlusSome.ramBytesUsed(), - is( - alignObjectSize(empty.ramBytesUsed() + (long) Long.BYTES * randomData.length) + alignObjectSize( - (long) valueIndices.length * Integer.BYTES - ) - ) - ); + long expected = empty.ramBytesUsed() + ramBytesForLongArray(randomData) + ramBytesForIntArray(valueIndices); + assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); @@ -210,17 +202,11 @@ public void testDoubleBlock() { Block emptyPlusOne = new DoubleArrayBlock(new double[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Double.BYTES) + alignObjectSize(Integer.BYTES))); - double[] randomData = new double[randomIntBetween(1, 1024)]; - int[] valueIndices = IntStream.range(0, randomData.length).toArray(); + double[] randomData = new double[randomIntBetween(2, 1024)]; + int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); Block emptyPlusSome = new DoubleArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); - assertThat( - emptyPlusSome.ramBytesUsed(), - is( - alignObjectSize(empty.ramBytesUsed() + (long) Double.BYTES * randomData.length) + alignObjectSize( - valueIndices.length * Integer.BYTES - ) - ) - ); + long expected = empty.ramBytesUsed() + ramBytesForDoubleArray(randomData) + ramBytesForIntArray(valueIndices); + assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); @@ -260,5 +246,21 @@ public long accumulateObject(Object o, long shallowSize, Map fiel } return shallowSize; } - }; + } + + static long ramBytesForBooleanArray(boolean[] arr) { + return alignObjectSize((long) Byte.BYTES * arr.length); + } + + static long ramBytesForIntArray(int[] arr) { + return alignObjectSize((long) Integer.BYTES * arr.length); + } + + static long ramBytesForLongArray(long[] arr) { + return alignObjectSize((long) Long.BYTES * arr.length); + } + + static long ramBytesForDoubleArray(double[] arr) { + return alignObjectSize((long) Long.BYTES * arr.length); + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java index 28721be14f548..f43159b7ce9bd 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java @@ -316,6 +316,75 @@ public void testFilterToStringMultiValue() { } } + /** Tests filtering on the last position of a block with multi-values. */ + public void testFilterOnLastPositionWithMultiValues() { + { + var builder = blockFactory.newBooleanBlockBuilder(0); + builder.beginPositionEntry().appendBoolean(true).appendBoolean(false).endPositionEntry(); + builder.beginPositionEntry().appendBoolean(false).appendBoolean(true).endPositionEntry(); + BooleanBlock block = builder.build(); + var filter = block.filter(1); + assertThat(filter.getPositionCount(), is(1)); + assertThat(filter.getValueCount(0), is(2)); + assertThat(filter.getBoolean(filter.getFirstValueIndex(0)), is(false)); + assertThat(filter.getBoolean(filter.getFirstValueIndex(0) + 1), is(true)); + Releasables.close(builder, block); + releaseAndAssertBreaker(filter); + } + { + var builder = blockFactory.newIntBlockBuilder(6); + builder.beginPositionEntry().appendInt(0).appendInt(10).endPositionEntry(); + builder.beginPositionEntry().appendInt(20).appendInt(50).endPositionEntry(); + var block = builder.build(); + var filter = block.filter(1); + assertThat(filter.getPositionCount(), is(1)); + assertThat(filter.getInt(filter.getFirstValueIndex(0)), is(20)); + assertThat(filter.getInt(filter.getFirstValueIndex(0) + 1), is(50)); + assertThat(filter.getValueCount(0), is(2)); + Releasables.close(builder, block); + releaseAndAssertBreaker(filter); + } + { + var builder = blockFactory.newLongBlockBuilder(6); + builder.beginPositionEntry().appendLong(0).appendLong(10).endPositionEntry(); + builder.beginPositionEntry().appendLong(20).appendLong(50).endPositionEntry(); + var block = builder.build(); + var filter = block.filter(1); + assertThat(filter.getPositionCount(), is(1)); + assertThat(filter.getValueCount(0), is(2)); + assertThat(filter.getLong(filter.getFirstValueIndex(0)), is(20L)); + assertThat(filter.getLong(filter.getFirstValueIndex(0) + 1), is(50L)); + Releasables.close(builder, block); + releaseAndAssertBreaker(filter); + } + { + var builder = blockFactory.newDoubleBlockBuilder(6); + builder.beginPositionEntry().appendDouble(0).appendDouble(10).endPositionEntry(); + builder.beginPositionEntry().appendDouble(0.002).appendDouble(10e8).endPositionEntry(); + var block = builder.build(); + var filter = block.filter(1); + assertThat(filter.getPositionCount(), is(1)); + assertThat(filter.getValueCount(0), is(2)); + assertThat(filter.getDouble(filter.getFirstValueIndex(0)), is(0.002)); + assertThat(filter.getDouble(filter.getFirstValueIndex(0) + 1), is(10e8)); + Releasables.close(builder, block); + releaseAndAssertBreaker(filter); + } + { + var builder = blockFactory.newBytesRefBlockBuilder(6); + builder.beginPositionEntry().appendBytesRef(new BytesRef("cat")).appendBytesRef(new BytesRef("dog")).endPositionEntry(); + builder.beginPositionEntry().appendBytesRef(new BytesRef("pig")).appendBytesRef(new BytesRef("chicken")).endPositionEntry(); + var block = builder.build(); + var filter = block.filter(1); + assertThat(filter.getPositionCount(), is(1)); + assertThat(filter.getValueCount(0), is(2)); + assertThat(filter.getBytesRef(filter.getFirstValueIndex(0), new BytesRef()), equalTo(new BytesRef("pig"))); + assertThat(filter.getBytesRef(filter.getFirstValueIndex(0) + 1, new BytesRef()), equalTo(new BytesRef("chicken"))); + Releasables.close(builder, block); + releaseAndAssertBreaker(filter); + } + } + static int randomPosition(int positionCount) { return positionCount == 1 ? 0 : randomIntBetween(0, positionCount - 1); } From 15828c9b6a56522aaefef506bf4f96cbe4958b46 Mon Sep 17 00:00:00 2001 From: Volodymyr Krasnikov <129072588+volodk85@users.noreply.github.com> Date: Tue, 10 Oct 2023 13:11:11 -0700 Subject: [PATCH 137/176] Add support for reindex over CCS (#96968) * Allow prefix index naming while reindexing from remote * Update docs/changelog/96968.yaml * Add ignore_unavailable index option to query parameters * Code style * Fix asciidoc * Exclude remote index names from reindex alias validation * spotless fix * Fix test * Fix test * code style fix * Do not eval remote expr locally + IT test * Fix test * in progress * Reverting back a bit (sync with main) * Ignore remote names in ReindexValidator * Add IT test, fix double re-indexing * codestyle * reduce scope of PR (do not handle ignore_unavailable request option) * minus api specs file * add datemath index name tests for within-cluster reindexing * Move out (to separate PR) logic which handles complex datemath expressions --- docs/changelog/96968.yaml | 6 + .../index/reindex/CrossClusterReindexIT.java | 162 ++++++++++++++++++ .../reindex/ReindexValidator.java | 18 +- 3 files changed, 185 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/96968.yaml create mode 100644 modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java diff --git a/docs/changelog/96968.yaml b/docs/changelog/96968.yaml new file mode 100644 index 0000000000000..8cc6d4ac4c284 --- /dev/null +++ b/docs/changelog/96968.yaml @@ -0,0 +1,6 @@ +pr: 96968 +summary: Allow prefix index naming while reindexing from remote +area: Reindex +type: bug +issues: + - 89120 diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java new file mode 100644 index 0000000000000..b182d9e8c2bde --- /dev/null +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java @@ -0,0 +1,162 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.reindex; + +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.AbstractMultiClustersTestCase; + +import java.util.Collection; +import java.util.List; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class CrossClusterReindexIT extends AbstractMultiClustersTestCase { + + private static final String REMOTE_CLUSTER = "remote-cluster"; + + @Override + protected boolean reuseClusters() { + return false; + } + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + return List.of(ReindexPlugin.class); + } + + private int indexDocs(Client client, String index) { + int numDocs = between(1, 100); + for (int i = 0; i < numDocs; i++) { + client.prepareIndex(index).setSource("f", "v").get(); + } + client.admin().indices().prepareRefresh(index).get(); + return numDocs; + } + + public void testReindexFromRemoteGivenIndexExists() throws Exception { + assertAcked(client(REMOTE_CLUSTER).admin().indices().prepareCreate("source-index-001")); + final int docsNumber = indexDocs(client(REMOTE_CLUSTER), "source-index-001"); + + final String sourceIndexInRemote = REMOTE_CLUSTER + ":" + "source-index-001"; + new ReindexRequestBuilder(client(LOCAL_CLUSTER), ReindexAction.INSTANCE).source(sourceIndexInRemote) + .destination("desc-index-001") + .get(); + + assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { + SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("desc-index-001") + .setQuery(new MatchAllQueryBuilder()) + .setSize(1000) + .get(); + final TotalHits totalHits = resp.getHits().getTotalHits(); + return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; + })); + } + + public void testReindexFromRemoteGivenSameIndexNames() throws Exception { + assertAcked(client(REMOTE_CLUSTER).admin().indices().prepareCreate("test-index-001")); + final int docsNumber = indexDocs(client(REMOTE_CLUSTER), "test-index-001"); + + final String sourceIndexInRemote = REMOTE_CLUSTER + ":" + "test-index-001"; + new ReindexRequestBuilder(client(LOCAL_CLUSTER), ReindexAction.INSTANCE).source(sourceIndexInRemote) + .destination("test-index-001") + .get(); + + assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { + SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("test-index-001") + .setQuery(new MatchAllQueryBuilder()) + .setSize(1000) + .get(); + final TotalHits totalHits = resp.getHits().getTotalHits(); + return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; + })); + } + + public void testReindexManyTimesFromRemoteGivenSameIndexNames() throws Exception { + assertAcked(client(REMOTE_CLUSTER).admin().indices().prepareCreate("test-index-001")); + final long docsNumber = indexDocs(client(REMOTE_CLUSTER), "test-index-001"); + + final String sourceIndexInRemote = REMOTE_CLUSTER + ":" + "test-index-001"; + + int N = randomIntBetween(2, 10); + for (int attempt = 0; attempt < N; attempt++) { + + BulkByScrollResponse response = new ReindexRequestBuilder(client(LOCAL_CLUSTER), ReindexAction.INSTANCE).source( + sourceIndexInRemote + ).destination("test-index-001").get(); + + if (attempt == 0) { + assertThat(response.getCreated(), equalTo(docsNumber)); + assertThat(response.getUpdated(), equalTo(0L)); + } else { + assertThat(response.getCreated(), equalTo(0L)); + assertThat(response.getUpdated(), equalTo(docsNumber)); + } + + assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { + SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("test-index-001") + .setQuery(new MatchAllQueryBuilder()) + .setSize(1000) + .get(); + final TotalHits totalHits = resp.getHits().getTotalHits(); + return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; + })); + } + } + + public void testReindexFromRemoteThrowOnUnavailableIndex() throws Exception { + + final String sourceIndexInRemote = REMOTE_CLUSTER + ":" + "no-such-source-index-001"; + expectThrows( + IndexNotFoundException.class, + () -> new ReindexRequestBuilder(client(LOCAL_CLUSTER), ReindexAction.INSTANCE).source(sourceIndexInRemote) + .destination("desc-index-001") + .get() + ); + + // assert that local index was not created either + final IndexNotFoundException e = expectThrows( + IndexNotFoundException.class, + () -> client(LOCAL_CLUSTER).prepareSearch("desc-index-001").setQuery(new MatchAllQueryBuilder()).setSize(1000).get() + ); + assertThat(e.getMessage(), containsString("no such index [desc-index-001]")); + } + + public void testReindexFromRemoteGivenSimpleDateMathIndexName() throws InterruptedException { + assertAcked(client(REMOTE_CLUSTER).admin().indices().prepareCreate("datemath-2001-01-02")); + final int docsNumber = indexDocs(client(REMOTE_CLUSTER), "datemath-2001-01-02"); + + final String sourceIndexInRemote = REMOTE_CLUSTER + ":" + ""; + new ReindexRequestBuilder(client(LOCAL_CLUSTER), ReindexAction.INSTANCE).source(sourceIndexInRemote) + .destination("desc-index-001") + .get(); + + assertTrue("Number of documents in source and desc indexes does not match", waitUntil(() -> { + SearchResponse resp = client(LOCAL_CLUSTER).prepareSearch("desc-index-001") + .setQuery(new MatchAllQueryBuilder()) + .setSize(1000) + .get(); + final TotalHits totalHits = resp.getHits().getTotalHits(); + return totalHits.relation == TotalHits.Relation.EQUAL_TO && totalHits.value == docsNumber; + })); + } + +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexValidator.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexValidator.java index aad38f64f64a5..a874dd1846e68 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexValidator.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexValidator.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.reindex.RemoteInfo; import org.elasticsearch.search.builder.SearchSourceBuilder; +import java.util.Arrays; import java.util.List; public class ReindexValidator { @@ -138,7 +139,12 @@ static void validateAgainstAliases( */ target = indexNameExpressionResolver.concreteWriteIndex(clusterState, destination).getName(); } - for (String sourceIndex : indexNameExpressionResolver.concreteIndexNames(clusterState, source)) { + SearchRequest filteredSource = skipRemoteIndexNames(source); + if (filteredSource.indices().length == 0) { + return; + } + String[] sourceIndexNames = indexNameExpressionResolver.concreteIndexNames(clusterState, filteredSource); + for (String sourceIndex : sourceIndexNames) { if (sourceIndex.equals(target)) { ActionRequestValidationException e = new ActionRequestValidationException(); e.addValidationError("reindex cannot write into an index its reading from [" + target + ']'); @@ -146,4 +152,14 @@ static void validateAgainstAliases( } } } + + private static SearchRequest skipRemoteIndexNames(SearchRequest source) { + return new SearchRequest(source).indices( + Arrays.stream(source.indices()).filter(name -> isRemoteExpression(name) == false).toArray(String[]::new) + ); + } + + private static boolean isRemoteExpression(String expression) { + return expression.contains(":"); + } } From 63b4ee128cbdd44d9f151ff0fe58c220c9cd1dcc Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 10 Oct 2023 22:12:35 +0100 Subject: [PATCH 138/176] Increase timeout in MixedClusterClientYamlTestSuiteIT (#100585) This suite now has a couple of thousand tests, some of which take a couple of seconds, so it times out occasionally. Relaxing the timeout further. --- .../backwards/MixedClusterClientYamlTestSuiteIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/MixedClusterClientYamlTestSuiteIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/MixedClusterClientYamlTestSuiteIT.java index 1b53a64fb096d..f7caf4805be15 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/MixedClusterClientYamlTestSuiteIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/MixedClusterClientYamlTestSuiteIT.java @@ -15,7 +15,7 @@ import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; -@TimeoutSuite(millis = 40 * TimeUnits.MINUTE) // some of the windows test VMs are slow as hell +@TimeoutSuite(millis = 60 * TimeUnits.MINUTE) // some of the windows test VMs are slow as hell public class MixedClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public MixedClusterClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { From c3b49c56c554690252e71d8376016393a5c6698b Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Wed, 11 Oct 2023 02:42:58 +0300 Subject: [PATCH 139/176] ESQL: Handle queries with non-existing enrich policies and no field (#100647) When dealing with non-existing policies, the validation code kept trying to determine the matching field resulting in a NPE. Fix #100593 --- docs/changelog/100647.yaml | 6 ++++++ .../org/elasticsearch/xpack/esql/analysis/Analyzer.java | 2 +- .../elasticsearch/xpack/esql/analysis/AnalyzerTests.java | 8 ++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/100647.yaml diff --git a/docs/changelog/100647.yaml b/docs/changelog/100647.yaml new file mode 100644 index 0000000000000..399407146af68 --- /dev/null +++ b/docs/changelog/100647.yaml @@ -0,0 +1,6 @@ +pr: 100647 +summary: "ESQL: Handle queries with non-existing enrich policies and no field" +area: ES|QL +type: bug +issues: + - 100593 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 8732321e8d068..818d58e91a91c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -219,7 +219,7 @@ protected LogicalPlan rule(Enrich plan, AnalyzerContext context) { ) : plan.policyName(); - var matchField = plan.matchField() == null || plan.matchField() instanceof EmptyAttribute + var matchField = policy != null && (plan.matchField() == null || plan.matchField() instanceof EmptyAttribute) ? new UnresolvedAttribute(plan.source(), policy.getMatchField()) : plan.matchField(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 1ee90256b95dd..6cbc1f93bcdf1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1256,6 +1256,14 @@ public void testNonExistingEnrichPolicy() { assertThat(e.getMessage(), containsString("unresolved enrich policy [foo]")); } + public void testNonExistingEnrichNoMatchField() { + var e = expectThrows(VerificationException.class, () -> analyze(""" + from test + | enrich foo + """)); + assertThat(e.getMessage(), containsString("unresolved enrich policy [foo]")); + } + public void testNonExistingEnrichPolicyWithSimilarName() { var e = expectThrows(VerificationException.class, () -> analyze(""" from test From 939de19eab594debacf7544512d1e9cd467da834 Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Wed, 11 Oct 2023 04:25:46 +0300 Subject: [PATCH 140/176] ESQL: Graceful handling of non-bool condition in the filter (#100645) Improve the Verifier to handle queries with non-boolean expressions used in the WHERE clause (where 10) In the process improve the readability of the Verifier class by extracting the checks into their own methods Fix #100049 Fix #100409 --- docs/changelog/100645.yaml | 7 + .../xpack/esql/analysis/Verifier.java | 187 ++++++++++-------- .../xpack/esql/analysis/VerifierTests.java | 8 + 3 files changed, 118 insertions(+), 84 deletions(-) create mode 100644 docs/changelog/100645.yaml diff --git a/docs/changelog/100645.yaml b/docs/changelog/100645.yaml new file mode 100644 index 0000000000000..e6bb6ab0fd653 --- /dev/null +++ b/docs/changelog/100645.yaml @@ -0,0 +1,7 @@ +pr: 100645 +summary: "ESQL: Graceful handling of non-bool condition in the filter" +area: ES|QL +type: bug +issues: + - 100049 + - 100409 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 59c6e2782b014..40f81d0247b33 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -55,6 +55,7 @@ import static org.elasticsearch.xpack.esql.stats.FeatureMetric.SORT; import static org.elasticsearch.xpack.esql.stats.FeatureMetric.STATS; import static org.elasticsearch.xpack.esql.stats.FeatureMetric.WHERE; +import static org.elasticsearch.xpack.ql.analyzer.VerifierChecks.checkFilterConditionType; import static org.elasticsearch.xpack.ql.common.Failure.fail; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; @@ -121,87 +122,128 @@ else if (p.resolved()) { // Concrete verifications plan.forEachDown(p -> { - if (p instanceof Aggregate agg) { - agg.aggregates().forEach(e -> { - var exp = e instanceof Alias ? ((Alias) e).child() : e; - if (exp instanceof AggregateFunction aggFunc) { - Expression field = aggFunc.field(); - - // TODO: allow an expression? - if ((field instanceof FieldAttribute - || field instanceof MetadataAttribute - || field instanceof ReferenceAttribute - || field instanceof Literal) == false) { - failures.add( - fail( - e, - "aggregate function's field must be an attribute or literal; found [" - + field.sourceText() - + "] of type [" - + field.nodeName() - + "]" - ) - ); - } - } else if (agg.groupings().contains(exp) == false) { // TODO: allow an expression? + // if the children are unresolved, so will this node; counting it will only add noise + if (p.childrenResolved() == false) { + return; + } + checkFilterConditionType(p, failures); + checkAggregate(p, failures); + checkRegexExtractOnlyOnStrings(p, failures); + + checkRow(p, failures); + checkEvalFields(p, failures); + + checkOperationsOnUnsignedLong(p, failures); + checkBinaryComparison(p, failures); + }); + + // gather metrics + if (failures.isEmpty()) { + gatherMetrics(plan); + } + + return failures; + } + + private static void checkAggregate(LogicalPlan p, Set failures) { + if (p instanceof Aggregate agg) { + agg.aggregates().forEach(e -> { + var exp = e instanceof Alias ? ((Alias) e).child() : e; + if (exp instanceof AggregateFunction aggFunc) { + Expression field = aggFunc.field(); + + // TODO: allow an expression? + if ((field instanceof FieldAttribute + || field instanceof MetadataAttribute + || field instanceof ReferenceAttribute + || field instanceof Literal) == false) { failures.add( fail( - exp, - "expected an aggregate function or group but got [" - + exp.sourceText() + e, + "aggregate function's field must be an attribute or literal; found [" + + field.sourceText() + "] of type [" - + exp.nodeName() + + field.nodeName() + "]" ) ); } - }); - } else if (p instanceof RegexExtract re) { - Expression expr = re.input(); - DataType type = expr.dataType(); - if (EsqlDataTypes.isString(type) == false) { + } else if (agg.groupings().contains(exp) == false) { // TODO: allow an expression? failures.add( fail( - expr, - "{} only supports KEYWORD or TEXT values, found expression [{}] type [{}]", - re.getClass().getSimpleName(), - expr.sourceText(), - type + exp, + "expected an aggregate function or group but got [" + exp.sourceText() + "] of type [" + exp.nodeName() + "]" ) ); } - } else if (p instanceof Row row) { - failures.addAll(validateRow(row)); - } else if (p instanceof Eval eval) { - failures.addAll(validateEval(eval)); + }); + } + } + + private static void checkRegexExtractOnlyOnStrings(LogicalPlan p, Set failures) { + if (p instanceof RegexExtract re) { + Expression expr = re.input(); + DataType type = expr.dataType(); + if (EsqlDataTypes.isString(type) == false) { + failures.add( + fail( + expr, + "{} only supports KEYWORD or TEXT values, found expression [{}] type [{}]", + re.getClass().getSimpleName(), + expr.sourceText(), + type + ) + ); } + } + } - p.forEachExpression(BinaryOperator.class, bo -> { - Failure f = validateUnsignedLongOperator(bo); - if (f != null) { - failures.add(f); + private static void checkRow(LogicalPlan p, Set failures) { + if (p instanceof Row row) { + row.fields().forEach(a -> { + if (EsqlDataTypes.isRepresentable(a.dataType()) == false) { + failures.add(fail(a, "cannot use [{}] directly in a row assignment", a.child().sourceText())); } }); - p.forEachExpression(BinaryComparison.class, bc -> { - Failure f = validateBinaryComparison(bc); - if (f != null) { - failures.add(f); - } - }); - p.forEachExpression(Neg.class, neg -> { - Failure f = validateUnsignedLongNegation(neg); - if (f != null) { - failures.add(f); + } + } + + private static void checkEvalFields(LogicalPlan p, Set failures) { + if (p instanceof Eval eval) { + eval.fields().forEach(field -> { + DataType dataType = field.dataType(); + if (EsqlDataTypes.isRepresentable(dataType) == false) { + failures.add( + fail(field, "EVAL does not support type [{}] in expression [{}]", dataType.typeName(), field.child().sourceText()) + ); } }); - }); - - // gather metrics - if (failures.isEmpty()) { - gatherMetrics(plan); } + } - return failures; + private static void checkOperationsOnUnsignedLong(LogicalPlan p, Set failures) { + p.forEachExpression(e -> { + Failure f = null; + + if (e instanceof BinaryOperator bo) { + f = validateUnsignedLongOperator(bo); + } else if (e instanceof Neg neg) { + f = validateUnsignedLongNegation(neg); + } + + if (f != null) { + failures.add(f); + } + }); + } + + private static void checkBinaryComparison(LogicalPlan p, Set failures) { + p.forEachExpression(BinaryComparison.class, bc -> { + Failure f = validateBinaryComparison(bc); + if (f != null) { + failures.add(f); + } + }); } private void gatherMetrics(LogicalPlan plan) { @@ -228,29 +270,6 @@ private void gatherMetrics(LogicalPlan plan) { } } - private static Collection validateRow(Row row) { - List failures = new ArrayList<>(row.fields().size()); - row.fields().forEach(a -> { - if (EsqlDataTypes.isRepresentable(a.dataType()) == false) { - failures.add(fail(a, "cannot use [{}] directly in a row assignment", a.child().sourceText())); - } - }); - return failures; - } - - private static Collection validateEval(Eval eval) { - List failures = new ArrayList<>(eval.fields().size()); - eval.fields().forEach(field -> { - DataType dataType = field.dataType(); - if (EsqlDataTypes.isRepresentable(dataType) == false) { - failures.add( - fail(field, "EVAL does not support type [{}] in expression [{}]", dataType.typeName(), field.child().sourceText()) - ); - } - }); - return failures; - } - /** * Limit QL's comparisons to types we support. */ diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 10f134432a0a2..cd1c9d8fbe830 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -292,6 +292,14 @@ public void testPeriodAndDurationInEval() { } } + public void testFilterNonBoolField() { + assertEquals("1:19: Condition expression needs to be boolean, found [INTEGER]", error("from test | where emp_no")); + } + + public void testFilterDateConstant() { + assertEquals("1:19: Condition expression needs to be boolean, found [DATE_PERIOD]", error("from test | where 1 year")); + } + private String error(String query) { return error(query, defaultAnalyzer); From c715c03da83914225b9883c5cc12d56487e97c4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Wed, 11 Oct 2023 06:57:07 +0200 Subject: [PATCH 141/176] [Transform] Make Transform Feature Reset really wait for all the tasks (#100624) --- docs/changelog/100624.yaml | 5 +++++ .../xpack/transform/integration/TestFeatureResetIT.java | 3 ++- .../java/org/elasticsearch/xpack/transform/Transform.java | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/100624.yaml diff --git a/docs/changelog/100624.yaml b/docs/changelog/100624.yaml new file mode 100644 index 0000000000000..247343bf03ed8 --- /dev/null +++ b/docs/changelog/100624.yaml @@ -0,0 +1,5 @@ +pr: 100624 +summary: Make Transform Feature Reset really wait for all the tasks +area: Transform +type: bug +issues: [] diff --git a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TestFeatureResetIT.java b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TestFeatureResetIT.java index 32cdcee280d6e..6ba0f572a2f9f 100644 --- a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TestFeatureResetIT.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TestFeatureResetIT.java @@ -114,7 +114,8 @@ public void testTransformFeatureReset() throws Exception { ); // assert transform indices are gone - assertThat(ESRestTestCase.entityAsMap(adminClient().performRequest(new Request("GET", ".transform-*"))), is(anEmptyMap())); + Map transformIndices = ESRestTestCase.entityAsMap(adminClient().performRequest(new Request("GET", ".transform-*"))); + assertThat("Indices were: " + transformIndices, transformIndices, is(anEmptyMap())); } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index c1964448c2662..81a719e24f633 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -426,7 +426,7 @@ public void cleanUpFeature( client.admin() .cluster() .prepareListTasks() - .setActions(TransformField.TASK_NAME) + .setActions(TransformField.TASK_NAME + "*") .setWaitForCompletion(true) .execute(ActionListener.wrap(listTransformTasks -> { listTransformTasks.rethrowFailures("Waiting for transform tasks"); From 2a7a74c8b93f5d8274c9ae24c6dea62e452cd662 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Wed, 11 Oct 2023 16:00:42 +1100 Subject: [PATCH 142/176] [Monitoring] Dont get cluster state until recovery (#100565) The LocalExporter would call `clusterService.state()` as part of a scheduled runnable, however this could end up running before the cluster state was recovered, and calling state() before recovery is not permitted (this trips an assertion in tests) The class already listened to cluster events and detected when cluster state recovery was complete, this commit causes the scheduled cleanup method to do nothing if the recovery event has not yet been received. --- docs/changelog/100565.yaml | 5 +++ .../exporter/local/LocalExporter.java | 5 +++ .../exporter/local/LocalExporterTests.java | 33 +++++++++++++++++++ 3 files changed, 43 insertions(+) create mode 100644 docs/changelog/100565.yaml diff --git a/docs/changelog/100565.yaml b/docs/changelog/100565.yaml new file mode 100644 index 0000000000000..066e9bbb4b227 --- /dev/null +++ b/docs/changelog/100565.yaml @@ -0,0 +1,5 @@ +pr: 100565 +summary: "[Monitoring] Dont get cluster state until recovery" +area: Monitoring +type: bug +issues: [] diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java index 467378f4cd738..ba43cf82d1458 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java @@ -596,6 +596,11 @@ private boolean canUseWatcher() { @Override public void onCleanUpIndices(TimeValue retention) { + if (stateInitialized.get() == false) { + // ^ this is once the cluster state is recovered. Don't try to interact with the cluster service until that happens + logger.debug("exporter not yet initialized"); + return; + } ClusterState clusterState = clusterService.state(); if (clusterService.localNode() == null || clusterState == null diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterTests.java index 3b0d301099d72..a30975be1055d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterTests.java @@ -7,17 +7,25 @@ package org.elasticsearch.xpack.monitoring.exporter.local; +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; import org.elasticsearch.xpack.monitoring.cleaner.CleanerService; import org.elasticsearch.xpack.monitoring.exporter.Exporter; import org.elasticsearch.xpack.monitoring.exporter.MonitoringMigrationCoordinator; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; public class LocalExporterTests extends ESTestCase { @@ -37,4 +45,29 @@ public void testLocalExporterRemovesListenersOnClose() { verify(licenseState).removeListener(exporter); } + public void testLocalExporterDoesNotInteractWithClusterServiceUntilStateIsRecovered() { + final ClusterService clusterService = mock(ClusterService.class); + final XPackLicenseState licenseState = mock(XPackLicenseState.class); + final Exporter.Config config = new Exporter.Config("name", "type", Settings.EMPTY, clusterService, licenseState); + final CleanerService cleanerService = mock(CleanerService.class); + final MonitoringMigrationCoordinator migrationCoordinator = new MonitoringMigrationCoordinator(); + try (Client client = new NoOpClient(getTestName())) { + final LocalExporter exporter = new LocalExporter(config, client, migrationCoordinator, cleanerService); + + final TimeValue retention = TimeValue.timeValueDays(randomIntBetween(1, 90)); + exporter.onCleanUpIndices(retention); + + verify(clusterService).addListener(same(exporter)); + verifyNoMoreInteractions(clusterService); + + final ClusterState oldState = ClusterState.EMPTY_STATE; + final ClusterState newState = ClusterStateCreationUtils.stateWithNoShard(); + exporter.clusterChanged(new ClusterChangedEvent(getTestName(), newState, oldState)); + verify(clusterService).localNode(); + + exporter.onCleanUpIndices(retention); + verify(clusterService).state(); + verify(clusterService, times(2)).localNode(); + } + } } From f35c3b49b535d7510c9ad2adae35d6cd4dd1f785 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 11 Oct 2023 06:03:34 +0100 Subject: [PATCH 143/176] AwaitsFix for #100653 --- .../xpack/downsample/DownsampleClusterDisruptionIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java index 84b55a5fa8009..cf234e31f1f7c 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java @@ -209,6 +209,7 @@ public boolean validateClusterForming() { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100653") public void testDownsampleIndexWithRollingRestart() throws Exception { try (InternalTestCluster cluster = internalCluster()) { final List masterNodes = cluster.startMasterOnlyNodes(1); From b5843e4b98d2856310b871ef03e2db09927e4b6a Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 11 Oct 2023 07:03:31 +0100 Subject: [PATCH 144/176] Encapsulate snapshots deletion process (#100617) Introduces the `SnapshotsDeletion` class which encapsulates the process of deleting some collection of snapshots. In particular this class gives us somewhere to store various deletion-wide data which significantly reduces the length of some argument lists. Relates #100568 --- .../test/repository_url/10_basic.yml | 4 +- .../SharedClusterSnapshotRestoreIT.java | 2 +- .../blobstore/BlobStoreRepository.java | 796 +++++++++--------- 3 files changed, 418 insertions(+), 384 deletions(-) diff --git a/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/10_basic.yml b/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/10_basic.yml index 4508dacbfe7e9..01152a5930f47 100644 --- a/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/10_basic.yml +++ b/modules/repository-url/src/yamlRestTest/resources/rest-api-spec/test/repository_url/10_basic.yml @@ -167,7 +167,7 @@ teardown: - match: {count: 3} - do: - catch: /cannot delete snapshot from a readonly repository/ + catch: /repository is readonly/ snapshot.delete: repository: repository-url snapshot: snapshot-two @@ -229,7 +229,7 @@ teardown: - match: {count: 3} - do: - catch: /cannot delete snapshot from a readonly repository/ + catch: /repository is readonly/ snapshot.delete: repository: repository-file snapshot: snapshot-one diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 7fa59f0b47b61..71d036cc6b0f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1050,7 +1050,7 @@ public void testReadonlyRepository() throws Exception { assertRequestBuilderThrows( client.admin().cluster().prepareDeleteSnapshot("readonly-repo", "test-snap"), RepositoryException.class, - "cannot delete snapshot from a readonly repository" + "repository is readonly" ); logger.info("--> try making another snapshot"); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 9a2d53312d577..98d725b9d1367 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -835,432 +835,466 @@ public void deleteSnapshots( long repositoryDataGeneration, IndexVersion repositoryFormatIndexVersion, SnapshotDeleteListener listener + ) { + createSnapshotsDeletion(snapshotIds, repositoryDataGeneration, repositoryFormatIndexVersion, new ActionListener<>() { + @Override + public void onResponse(SnapshotsDeletion snapshotsDeletion) { + snapshotsDeletion.runDelete(listener); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + private void createSnapshotsDeletion( + Collection snapshotIds, + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, + ActionListener listener ) { if (isReadOnly()) { - listener.onFailure(new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository")); + listener.onFailure(new RepositoryException(metadata.name(), "repository is readonly")); } else { - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - final Map rootBlobs = blobContainer().listBlobs(OperationPurpose.SNAPSHOT); - final RepositoryData repositoryData = safeRepositoryData(repositoryDataGeneration, rootBlobs); - // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never - // delete an index that was created by another master node after writing this index-N blob. - final Map foundIndices = blobStore().blobContainer(indicesPath()) - .children(OperationPurpose.SNAPSHOT); - doDeleteShardSnapshots( - snapshotIds, - repositoryDataGeneration, - foundIndices, - rootBlobs, - repositoryData, - repositoryFormatIndexVersion, - listener - ); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(new RepositoryException(metadata.name(), "failed to delete snapshots " + snapshotIds, e)); - } - }); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.supply(listener, () -> { + final var originalRootBlobs = blobContainer().listBlobs(OperationPurpose.SNAPSHOT); + return new SnapshotsDeletion( + snapshotIds, + repositoryDataGeneration, + repositoryFormatIndexVersion, + originalRootBlobs, + blobStore().blobContainer(indicesPath()).children(OperationPurpose.SNAPSHOT), + safeRepositoryData(repositoryDataGeneration, originalRootBlobs) + ); + })); } } /** - * The result of removing a snapshot from a shard folder in the repository. - * - * @param indexId Index that the snapshot was removed from - * @param shardId Shard id that the snapshot was removed from - * @param newGeneration Id of the new index-${uuid} blob that does not include the snapshot any more - * @param blobsToDelete Blob names in the shard directory that have become unreferenced in the new shard generation + *

+ * Represents the process of deleting some collection of snapshots within this repository which since 7.6.0 looks like this: + *

+ *
    + *
  • Write a new {@link BlobStoreIndexShardSnapshots} for each affected shard, and compute the blobs to delete.
  • + *
  • Update the {@link RepositoryData} to remove references to deleted snapshots/indices and point to the new + * {@link BlobStoreIndexShardSnapshots} files.
  • + *
  • Remove up any now-unreferenced blobs.
  • + *
+ *

+ * Until the {@link RepositoryData} is updated there should be no other activities in the repository, and in particular the root + * blob must not change until it is updated by this deletion and {@link SnapshotDeleteListener#onRepositoryDataWritten} is called. + *

*/ - private record ShardSnapshotMetaDeleteResult( - IndexId indexId, - int shardId, - ShardGeneration newGeneration, - Collection blobsToDelete - ) {} - - // --------------------------------------------------------------------------------------------------------------------------------- - // The overall flow of execution + class SnapshotsDeletion { + + /** + * The IDs of the snapshots to delete. + */ + private final Collection snapshotIds; + + /** + * The {@link RepositoryData} generation at the start of the process, to ensure that the {@link RepositoryData} does not change + * while the new {@link BlobStoreIndexShardSnapshots} are being written. + */ + private final long originalRepositoryDataGeneration; + + /** + * The minimum {@link IndexVersion} of the nodes in the cluster and the snapshots remaining in the repository. The repository must + * remain readable by all node versions which support this {@link IndexVersion}. + */ + private final IndexVersion repositoryFormatIndexVersion; + + /** + * Whether the {@link #repositoryFormatIndexVersion} is new enough to support naming {@link BlobStoreIndexShardSnapshots} blobs with + * UUIDs (i.e. does not need to remain compatible with versions before v7.6.0). Older repositories use (unsafe) numeric indices for + * these blobs instead. + */ + private final boolean useShardGenerations; + + /** + * All blobs in the repository root at the start of the operation, obtained by listing the repository contents. Note that this may + * include some blobs which are no longer referenced by the current {@link RepositoryData}, but which have not yet been removed by + * the cleanup that follows an earlier deletion. This cleanup may still be ongoing (we do not wait for it to complete before + * starting the next repository operation) or it may have failed before completion (it could have been running on a different node, + * which crashed for unrelated reasons) so we track all the blobs here and clean them up again at the end. + */ + private final Map originalRootBlobs; + + /** + * All index containers at the start of the operation, obtained by listing the repository contents. Note that this may include some + * containers which are no longer referenced by the current {@link RepositoryData}, but which have not yet been removed by + * the cleanup that follows an earlier deletion. This cleanup may or may not still be ongoing (it could have been running on a + * different node, which died before completing it) so we track all the blobs here and clean them up again at the end. + */ + private final Map originalIndexContainers; + + /** + * The {@link RepositoryData} at the start of the operation, obtained after verifying that {@link #originalRootBlobs} contains no + * {@link RepositoryData} blob newer than the one identified by {@link #originalRepositoryDataGeneration}. + */ + private final RepositoryData originalRepositoryData; + + /** + * Executor to use for all repository interactions. + */ + private final Executor snapshotExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + + SnapshotsDeletion( + Collection snapshotIds, + long originalRepositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, + Map originalRootBlobs, + Map originalIndexContainers, + RepositoryData originalRepositoryData + ) { + this.snapshotIds = snapshotIds; + this.originalRepositoryDataGeneration = originalRepositoryDataGeneration; + this.repositoryFormatIndexVersion = repositoryFormatIndexVersion; + this.useShardGenerations = SnapshotsService.useShardGenerations(repositoryFormatIndexVersion); + this.originalRootBlobs = originalRootBlobs; + this.originalIndexContainers = originalIndexContainers; + this.originalRepositoryData = originalRepositoryData; + } + + /** + * The result of removing a snapshot from a shard folder in the repository. + * + * @param indexId Index that the snapshot was removed from + * @param shardId Shard id that the snapshot was removed from + * @param newGeneration Id of the new index-${uuid} blob that does not include the snapshot any more + * @param blobsToDelete Blob names in the shard directory that have become unreferenced in the new shard generation + */ + private record ShardSnapshotMetaDeleteResult( + IndexId indexId, + int shardId, + ShardGeneration newGeneration, + Collection blobsToDelete + ) {} - /** - * After updating the {@link RepositoryData} each of the shards directories is individually first moved to the next shard generation - * and then has all now unreferenced blobs in it deleted. - * - * @param snapshotIds SnapshotIds to delete - * @param originalRepositoryDataGeneration {@link RepositoryData} generation at the start of the process. - * @param originalIndexContainers All index containers at the start of the operation, obtained by listing the repository - * contents. - * @param originalRootBlobs All blobs found at the root of the repository at the start of the operation, obtained by - * listing the repository contents. - * @param originalRepositoryData {@link RepositoryData} at the start of the operation. - * @param repositoryFormatIndexVersion The minimum {@link IndexVersion} of the nodes in the cluster and the snapshots remaining in - * the repository. - * @param listener Listener to invoke once finished - */ - private void doDeleteShardSnapshots( - Collection snapshotIds, - long originalRepositoryDataGeneration, - Map originalIndexContainers, - Map originalRootBlobs, - RepositoryData originalRepositoryData, - IndexVersion repositoryFormatIndexVersion, - SnapshotDeleteListener listener - ) { - if (SnapshotsService.useShardGenerations(repositoryFormatIndexVersion)) { - // First write the new shard state metadata (with the removed snapshot) and compute deletion targets - final ListenableFuture> writeShardMetaDataAndComputeDeletesStep = - new ListenableFuture<>(); - writeUpdatedShardMetaDataAndComputeDeletes(snapshotIds, originalRepositoryData, true, writeShardMetaDataAndComputeDeletesStep); - // Once we have put the new shard-level metadata into place, we can update the repository metadata as follows: - // 1. Remove the snapshots from the list of existing snapshots - // 2. Update the index shard generations of all updated shard folders - // - // Note: If we fail updating any of the individual shard paths, none of them are changed since the newly created - // index-${gen_uuid} will not be referenced by the existing RepositoryData and new RepositoryData is only - // written if all shard paths have been successfully updated. - final ListenableFuture writeUpdatedRepoDataStep = new ListenableFuture<>(); - writeShardMetaDataAndComputeDeletesStep.addListener(ActionListener.wrap(shardDeleteResults -> { - final ShardGenerations.Builder builder = ShardGenerations.builder(); - for (ShardSnapshotMetaDeleteResult newGen : shardDeleteResults) { - builder.put(newGen.indexId, newGen.shardId, newGen.newGeneration); - } - final RepositoryData newRepositoryData = originalRepositoryData.removeSnapshots(snapshotIds, builder.build()); + // --------------------------------------------------------------------------------------------------------------------------------- + // The overall flow of execution + + private void runDelete(SnapshotDeleteListener listener) { + if (useShardGenerations) { + // First write the new shard state metadata (with the removed snapshot) and compute deletion targets + final ListenableFuture> writeShardMetaDataAndComputeDeletesStep = + new ListenableFuture<>(); + writeUpdatedShardMetaDataAndComputeDeletes(writeShardMetaDataAndComputeDeletesStep); + // Once we have put the new shard-level metadata into place, we can update the repository metadata as follows: + // 1. Remove the snapshots from the list of existing snapshots + // 2. Update the index shard generations of all updated shard folders + // + // Note: If we fail updating any of the individual shard paths, none of them are changed since the newly created + // index-${gen_uuid} will not be referenced by the existing RepositoryData and new RepositoryData is only + // written if all shard paths have been successfully updated. + final ListenableFuture writeUpdatedRepoDataStep = new ListenableFuture<>(); + writeShardMetaDataAndComputeDeletesStep.addListener(ActionListener.wrap(shardDeleteResults -> { + final ShardGenerations.Builder builder = ShardGenerations.builder(); + for (ShardSnapshotMetaDeleteResult newGen : shardDeleteResults) { + builder.put(newGen.indexId, newGen.shardId, newGen.newGeneration); + } + final RepositoryData newRepositoryData = originalRepositoryData.removeSnapshots(snapshotIds, builder.build()); + writeIndexGen( + newRepositoryData, + originalRepositoryDataGeneration, + repositoryFormatIndexVersion, + Function.identity(), + ActionListener.wrap(writeUpdatedRepoDataStep::onResponse, listener::onFailure) + ); + }, listener::onFailure)); + // Once we have updated the repository, run the clean-ups + writeUpdatedRepoDataStep.addListener(ActionListener.wrap(newRepositoryData -> { + listener.onRepositoryDataWritten(newRepositoryData); + // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion + try (var refs = new RefCountingRunnable(listener::onDone)) { + cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); + cleanupUnlinkedShardLevelBlobs(writeShardMetaDataAndComputeDeletesStep.result(), refs.acquireListener()); + } + }, listener::onFailure)); + } else { + // Write the new repository data first (with the removed snapshot), using no shard generations writeIndexGen( - newRepositoryData, + originalRepositoryData.removeSnapshots(snapshotIds, ShardGenerations.EMPTY), originalRepositoryDataGeneration, repositoryFormatIndexVersion, Function.identity(), - ActionListener.wrap(writeUpdatedRepoDataStep::onResponse, listener::onFailure) - ); - }, listener::onFailure)); - // Once we have updated the repository, run the clean-ups - writeUpdatedRepoDataStep.addListener(ActionListener.wrap(newRepositoryData -> { - listener.onRepositoryDataWritten(newRepositoryData); - // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - try (var refs = new RefCountingRunnable(listener::onDone)) { - cleanupUnlinkedRootAndIndicesBlobs( - snapshotIds, - originalIndexContainers, - originalRootBlobs, - newRepositoryData, - refs.acquireListener() - ); - cleanupUnlinkedShardLevelBlobs( - originalRepositoryData, - snapshotIds, - writeShardMetaDataAndComputeDeletesStep.result(), - refs.acquireListener() - ); - } - }, listener::onFailure)); - } else { - // Write the new repository data first (with the removed snapshot), using no shard generations - writeIndexGen( - originalRepositoryData.removeSnapshots(snapshotIds, ShardGenerations.EMPTY), - originalRepositoryDataGeneration, - repositoryFormatIndexVersion, - Function.identity(), - ActionListener.wrap(newRepositoryData -> { - try (var refs = new RefCountingRunnable(() -> { - listener.onRepositoryDataWritten(newRepositoryData); - listener.onDone(); - })) { - // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - cleanupUnlinkedRootAndIndicesBlobs( - snapshotIds, - originalIndexContainers, - originalRootBlobs, - newRepositoryData, - refs.acquireListener() - ); - - // writeIndexGen finishes on master-service thread so must fork here. - threadPool.executor(ThreadPool.Names.SNAPSHOT) - .execute( + ActionListener.wrap(newRepositoryData -> { + try (var refs = new RefCountingRunnable(() -> { + listener.onRepositoryDataWritten(newRepositoryData); + listener.onDone(); + })) { + // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion + cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); + + // writeIndexGen finishes on master-service thread so must fork here. + snapshotExecutor.execute( ActionRunnable.wrap( refs.acquireListener(), l0 -> writeUpdatedShardMetaDataAndComputeDeletes( - snapshotIds, - originalRepositoryData, - false, - l0.delegateFailure( - (l, deleteResults) -> cleanupUnlinkedShardLevelBlobs( - originalRepositoryData, - snapshotIds, - deleteResults, - l - ) - ) + l0.delegateFailure((l, shardDeleteResults) -> cleanupUnlinkedShardLevelBlobs(shardDeleteResults, l)) ) ) ); - } - }, listener::onFailure) - ); + } + }, listener::onFailure) + ); + } } - } - // --------------------------------------------------------------------------------------------------------------------------------- - // Updating the shard-level metadata and accumulating results + // --------------------------------------------------------------------------------------------------------------------------------- + // Updating the shard-level metadata and accumulating results - // updates the shard state metadata for shards of a snapshot that is to be deleted. Also computes the files to be cleaned up. - private void writeUpdatedShardMetaDataAndComputeDeletes( - Collection snapshotIds, - RepositoryData originalRepositoryData, - boolean useShardGenerations, - ActionListener> onAllShardsCompleted - ) { - - final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - final List indices = originalRepositoryData.indicesToUpdateAfterRemovingSnapshot(snapshotIds); - - if (indices.isEmpty()) { - onAllShardsCompleted.onResponse(Collections.emptyList()); - return; - } + // updates the shard state metadata for shards of a snapshot that is to be deleted. Also computes the files to be cleaned up. + private void writeUpdatedShardMetaDataAndComputeDeletes( + ActionListener> onAllShardsCompleted + ) { - // Listener that flattens out the delete results for each index - final ActionListener> deleteIndexMetadataListener = new GroupedActionListener<>( - indices.size(), - onAllShardsCompleted.map(res -> res.stream().flatMap(Collection::stream).toList()) - ); + final List indices = originalRepositoryData.indicesToUpdateAfterRemovingSnapshot(snapshotIds); - for (IndexId indexId : indices) { - final Set snapshotsWithIndex = Set.copyOf(originalRepositoryData.getSnapshots(indexId)); - final Set survivingSnapshots = snapshotsWithIndex.stream() - .filter(id -> snapshotIds.contains(id) == false) - .collect(Collectors.toSet()); - final ListenableFuture> shardCountListener = new ListenableFuture<>(); - final Collection indexMetaGenerations = snapshotIds.stream() - .filter(snapshotsWithIndex::contains) - .map(id -> originalRepositoryData.indexMetaDataGenerations().indexMetaBlobId(id, indexId)) - .collect(Collectors.toSet()); - final ActionListener allShardCountsListener = new GroupedActionListener<>( - indexMetaGenerations.size(), - shardCountListener - ); - final BlobContainer indexContainer = indexContainer(indexId); - for (String indexMetaGeneration : indexMetaGenerations) { - executor.execute(ActionRunnable.supply(allShardCountsListener, () -> { - try { - return INDEX_METADATA_FORMAT.read(metadata.name(), indexContainer, indexMetaGeneration, namedXContentRegistry) - .getNumberOfShards(); - } catch (Exception ex) { - logger.warn( - () -> format("[%s] [%s] failed to read metadata for index", indexMetaGeneration, indexId.getName()), - ex - ); - // Just invoke the listener without any shard generations to count it down, this index will be cleaned up - // by the stale data cleanup in the end. - // TODO: Getting here means repository corruption. We should find a way of dealing with this instead of just - // ignoring it and letting the cleanup deal with it. - return null; - } - })); + if (indices.isEmpty()) { + onAllShardsCompleted.onResponse(Collections.emptyList()); + return; } - // ----------------------------------------------------------------------------------------------------------------------------- - // Determining the shard count - - shardCountListener.addListener(deleteIndexMetadataListener.delegateFailureAndWrap((delegate, counts) -> { - final int shardCount = counts.stream().mapToInt(i -> i).max().orElse(0); - if (shardCount == 0) { - delegate.onResponse(null); - return; - } - // Listener for collecting the results of removing the snapshot from each shard's metadata in the current index - final ActionListener allShardsListener = new GroupedActionListener<>(shardCount, delegate); - for (int i = 0; i < shardCount; i++) { - final int shardId = i; - executor.execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - final BlobContainer shardContainer = shardContainer(indexId, shardId); - final Set originalShardBlobs = shardContainer.listBlobs(OperationPurpose.SNAPSHOT).keySet(); - final BlobStoreIndexShardSnapshots blobStoreIndexShardSnapshots; - final long newGen; - if (useShardGenerations) { - newGen = -1L; - blobStoreIndexShardSnapshots = buildBlobStoreIndexShardSnapshots( - originalShardBlobs, - shardContainer, - originalRepositoryData.shardGenerations().getShardGen(indexId, shardId) - ).v1(); - } else { - Tuple tuple = buildBlobStoreIndexShardSnapshots( - originalShardBlobs, - shardContainer - ); - newGen = tuple.v2() + 1; - blobStoreIndexShardSnapshots = tuple.v1(); - } - allShardsListener.onResponse( - deleteFromShardSnapshotMeta( - survivingSnapshots, - indexId, - shardId, - snapshotIds, - shardContainer, - originalShardBlobs, - blobStoreIndexShardSnapshots, - newGen - ) - ); - } + // Listener that flattens out the delete results for each index + final ActionListener> deleteIndexMetadataListener = new GroupedActionListener<>( + indices.size(), + onAllShardsCompleted.map(res -> res.stream().flatMap(Collection::stream).toList()) + ); - @Override - public void onFailure(Exception ex) { + for (IndexId indexId : indices) { + final Set snapshotsWithIndex = Set.copyOf(originalRepositoryData.getSnapshots(indexId)); + final Set survivingSnapshots = snapshotsWithIndex.stream() + .filter(id -> snapshotIds.contains(id) == false) + .collect(Collectors.toSet()); + final ListenableFuture> shardCountListener = new ListenableFuture<>(); + final Collection indexMetaGenerations = snapshotIds.stream() + .filter(snapshotsWithIndex::contains) + .map(id -> originalRepositoryData.indexMetaDataGenerations().indexMetaBlobId(id, indexId)) + .collect(Collectors.toSet()); + final ActionListener allShardCountsListener = new GroupedActionListener<>( + indexMetaGenerations.size(), + shardCountListener + ); + final BlobContainer indexContainer = indexContainer(indexId); + for (String indexMetaGeneration : indexMetaGenerations) { + snapshotExecutor.execute(ActionRunnable.supply(allShardCountsListener, () -> { + try { + return INDEX_METADATA_FORMAT.read(metadata.name(), indexContainer, indexMetaGeneration, namedXContentRegistry) + .getNumberOfShards(); + } catch (Exception ex) { logger.warn( - () -> format("%s failed to delete shard data for shard [%s][%s]", snapshotIds, indexId.getName(), shardId), + () -> format("[%s] [%s] failed to read metadata for index", indexMetaGeneration, indexId.getName()), ex ); - // Just passing null here to count down the listener instead of failing it, the stale data left behind - // here will be retried in the next delete or repository cleanup - allShardsListener.onResponse(null); + // Just invoke the listener without any shard generations to count it down, this index will be cleaned up + // by the stale data cleanup in the end. + // TODO: Getting here means repository corruption. We should find a way of dealing with this instead of just + // ignoring it and letting the cleanup deal with it. + return null; } - }); + })); } - })); - } - } - // ----------------------------------------------------------------------------------------------------------------------------- - // Updating each shard + // ------------------------------------------------------------------------------------------------------------------------- + // Determining the shard count - /** - * Delete snapshot from shard level metadata. - * - * @param indexGeneration generation to write the new shard level level metadata to. If negative a uuid id shard generation should be - * used - */ - private ShardSnapshotMetaDeleteResult deleteFromShardSnapshotMeta( - Set survivingSnapshots, - IndexId indexId, - int shardId, - Collection snapshotIds, - BlobContainer shardContainer, - Set originalShardBlobs, - BlobStoreIndexShardSnapshots snapshots, - long indexGeneration - ) { - // Build a list of snapshots that should be preserved - final BlobStoreIndexShardSnapshots updatedSnapshots = snapshots.withRetainedSnapshots(survivingSnapshots); - ShardGeneration writtenGeneration = null; - try { - if (updatedSnapshots.snapshots().isEmpty()) { - return new ShardSnapshotMetaDeleteResult(indexId, shardId, ShardGenerations.DELETED_SHARD_GEN, originalShardBlobs); - } else { - if (indexGeneration < 0L) { - writtenGeneration = ShardGeneration.newGeneration(); - INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedSnapshots, shardContainer, writtenGeneration.toBlobNamePart(), compress); + shardCountListener.addListener(deleteIndexMetadataListener.delegateFailureAndWrap((delegate, counts) -> { + final int shardCount = counts.stream().mapToInt(i -> i).max().orElse(0); + if (shardCount == 0) { + delegate.onResponse(null); + return; + } + // Listener for collecting the results of removing the snapshot from each shard's metadata in the current index + final ActionListener allShardsListener = new GroupedActionListener<>( + shardCount, + delegate + ); + for (int i = 0; i < shardCount; i++) { + final int shardId = i; + snapshotExecutor.execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + final BlobContainer shardContainer = shardContainer(indexId, shardId); + final Set originalShardBlobs = shardContainer.listBlobs(OperationPurpose.SNAPSHOT).keySet(); + final BlobStoreIndexShardSnapshots blobStoreIndexShardSnapshots; + final long newGen; + if (useShardGenerations) { + newGen = -1L; + blobStoreIndexShardSnapshots = buildBlobStoreIndexShardSnapshots( + originalShardBlobs, + shardContainer, + originalRepositoryData.shardGenerations().getShardGen(indexId, shardId) + ).v1(); + } else { + Tuple tuple = buildBlobStoreIndexShardSnapshots( + originalShardBlobs, + shardContainer + ); + newGen = tuple.v2() + 1; + blobStoreIndexShardSnapshots = tuple.v1(); + } + allShardsListener.onResponse( + deleteFromShardSnapshotMeta( + survivingSnapshots, + indexId, + shardId, + snapshotIds, + shardContainer, + originalShardBlobs, + blobStoreIndexShardSnapshots, + newGen + ) + ); + } + + @Override + public void onFailure(Exception ex) { + logger.warn( + () -> format( + "%s failed to delete shard data for shard [%s][%s]", + snapshotIds, + indexId.getName(), + shardId + ), + ex + ); + // Just passing null here to count down the listener instead of failing it, the stale data left behind + // here will be retried in the next delete or repository cleanup + allShardsListener.onResponse(null); + } + }); + } + })); + } + } + + // ----------------------------------------------------------------------------------------------------------------------------- + // Updating each shard + + /** + * Delete snapshot from shard level metadata. + * + * @param indexGeneration generation to write the new shard level level metadata to. If negative a uuid id shard generation should + * be used + */ + private ShardSnapshotMetaDeleteResult deleteFromShardSnapshotMeta( + Set survivingSnapshots, + IndexId indexId, + int shardId, + Collection snapshotIds, + BlobContainer shardContainer, + Set originalShardBlobs, + BlobStoreIndexShardSnapshots snapshots, + long indexGeneration + ) { + // Build a list of snapshots that should be preserved + final BlobStoreIndexShardSnapshots updatedSnapshots = snapshots.withRetainedSnapshots(survivingSnapshots); + ShardGeneration writtenGeneration = null; + try { + if (updatedSnapshots.snapshots().isEmpty()) { + return new ShardSnapshotMetaDeleteResult(indexId, shardId, ShardGenerations.DELETED_SHARD_GEN, originalShardBlobs); } else { - writtenGeneration = new ShardGeneration(indexGeneration); - writeShardIndexBlobAtomic(shardContainer, indexGeneration, updatedSnapshots, Collections.emptyMap()); + if (indexGeneration < 0L) { + writtenGeneration = ShardGeneration.newGeneration(); + INDEX_SHARD_SNAPSHOTS_FORMAT.write(updatedSnapshots, shardContainer, writtenGeneration.toBlobNamePart(), compress); + } else { + writtenGeneration = new ShardGeneration(indexGeneration); + writeShardIndexBlobAtomic(shardContainer, indexGeneration, updatedSnapshots, Collections.emptyMap()); + } + final Set survivingSnapshotUUIDs = survivingSnapshots.stream() + .map(SnapshotId::getUUID) + .collect(Collectors.toSet()); + return new ShardSnapshotMetaDeleteResult( + indexId, + shardId, + writtenGeneration, + unusedBlobs(originalShardBlobs, survivingSnapshotUUIDs, updatedSnapshots) + ); } - final Set survivingSnapshotUUIDs = survivingSnapshots.stream().map(SnapshotId::getUUID).collect(Collectors.toSet()); - return new ShardSnapshotMetaDeleteResult( - indexId, - shardId, - writtenGeneration, - unusedBlobs(originalShardBlobs, survivingSnapshotUUIDs, updatedSnapshots) + } catch (IOException e) { + throw new RepositoryException( + metadata.name(), + "Failed to finalize snapshot deletion " + + snapshotIds + + " with shard index [" + + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(writtenGeneration.toBlobNamePart()) + + "]", + e ); } - } catch (IOException e) { - throw new RepositoryException( - metadata.name(), - "Failed to finalize snapshot deletion " - + snapshotIds - + " with shard index [" - + INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(writtenGeneration.toBlobNamePart()) - + "]", - e - ); } - } - // Unused blobs are all previous index-, data- and meta-blobs and that are not referenced by the new index- as well as all - // temporary blobs - private static List unusedBlobs( - Set originalShardBlobs, - Set survivingSnapshotUUIDs, - BlobStoreIndexShardSnapshots updatedSnapshots - ) { - return originalShardBlobs.stream() - .filter( - blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX) - || (blob.startsWith(SNAPSHOT_PREFIX) - && blob.endsWith(".dat") - && survivingSnapshotUUIDs.contains( - blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()) - ) == false) - || (blob.startsWith(UPLOADED_DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null) - || FsBlobContainer.isTempBlobName(blob) - ) - .toList(); - } - - // --------------------------------------------------------------------------------------------------------------------------------- - // Cleaning up dangling blobs + // Unused blobs are all previous index-, data- and meta-blobs and that are not referenced by the new index- as well as all + // temporary blobs + private static List unusedBlobs( + Set originalShardBlobs, + Set survivingSnapshotUUIDs, + BlobStoreIndexShardSnapshots updatedSnapshots + ) { + return originalShardBlobs.stream() + .filter( + blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX) + || (blob.startsWith(SNAPSHOT_PREFIX) + && blob.endsWith(".dat") + && survivingSnapshotUUIDs.contains( + blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()) + ) == false) + || (blob.startsWith(UPLOADED_DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null) + || FsBlobContainer.isTempBlobName(blob) + ) + .toList(); + } - /** - * Delete any dangling blobs in the repository root (i.e. {@link RepositoryData}, {@link SnapshotInfo} and {@link Metadata} blobs) - * as well as any containers for indices that are now completely unreferenced. - */ - private void cleanupUnlinkedRootAndIndicesBlobs( - Collection snapshotIds, - Map originalIndexContainers, - Map originalRootBlobs, - RepositoryData newRepositoryData, - ActionListener listener - ) { - cleanupStaleBlobs(snapshotIds, originalIndexContainers, originalRootBlobs, newRepositoryData, listener.map(ignored -> null)); - } + // --------------------------------------------------------------------------------------------------------------------------------- + // Cleaning up dangling blobs - private void cleanupUnlinkedShardLevelBlobs( - RepositoryData originalRepositoryData, - Collection snapshotIds, - Collection shardDeleteResults, - ActionListener listener - ) { - final Iterator filesToDelete = resolveFilesToDelete(originalRepositoryData, snapshotIds, shardDeleteResults); - if (filesToDelete.hasNext() == false) { - listener.onResponse(null); - return; + /** + * Delete any dangling blobs in the repository root (i.e. {@link RepositoryData}, {@link SnapshotInfo} and {@link Metadata} blobs) + * as well as any containers for indices that are now completely unreferenced. + */ + private void cleanupUnlinkedRootAndIndicesBlobs(RepositoryData newRepositoryData, ActionListener listener) { + cleanupStaleBlobs(snapshotIds, originalIndexContainers, originalRootBlobs, newRepositoryData, listener.map(ignored -> null)); } - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { - try { - deleteFromContainer(blobContainer(), filesToDelete); - l.onResponse(null); - } catch (Exception e) { - logger.warn(() -> format("%s Failed to delete some blobs during snapshot delete", snapshotIds), e); - throw e; + + private void cleanupUnlinkedShardLevelBlobs( + Collection shardDeleteResults, + ActionListener listener + ) { + final Iterator filesToDelete = resolveFilesToDelete(shardDeleteResults); + if (filesToDelete.hasNext() == false) { + listener.onResponse(null); + return; } - })); - } + snapshotExecutor.execute(ActionRunnable.wrap(listener, l -> { + try { + deleteFromContainer(blobContainer(), filesToDelete); + l.onResponse(null); + } catch (Exception e) { + logger.warn(() -> format("%s Failed to delete some blobs during snapshot delete", snapshotIds), e); + throw e; + } + })); + } - private Iterator resolveFilesToDelete( - RepositoryData oldRepositoryData, - Collection snapshotIds, - Collection deleteResults - ) { - final String basePath = basePath().buildAsString(); - final int basePathLen = basePath.length(); - final Map> indexMetaGenerations = oldRepositoryData.indexMetaDataToRemoveAfterRemovingSnapshots( - snapshotIds - ); - return Stream.concat(deleteResults.stream().flatMap(shardResult -> { - final String shardPath = shardPath(shardResult.indexId, shardResult.shardId).buildAsString(); - return shardResult.blobsToDelete.stream().map(blob -> shardPath + blob); - }), indexMetaGenerations.entrySet().stream().flatMap(entry -> { - final String indexContainerPath = indexPath(entry.getKey()).buildAsString(); - return entry.getValue().stream().map(id -> indexContainerPath + INDEX_METADATA_FORMAT.blobName(id)); - })).map(absolutePath -> { - assert absolutePath.startsWith(basePath); - return absolutePath.substring(basePathLen); - }).iterator(); + private Iterator resolveFilesToDelete(Collection deleteResults) { + final String basePath = basePath().buildAsString(); + final int basePathLen = basePath.length(); + final Map> indexMetaGenerations = originalRepositoryData + .indexMetaDataToRemoveAfterRemovingSnapshots(snapshotIds); + return Stream.concat(deleteResults.stream().flatMap(shardResult -> { + final String shardPath = shardPath(shardResult.indexId, shardResult.shardId).buildAsString(); + return shardResult.blobsToDelete.stream().map(blob -> shardPath + blob); + }), indexMetaGenerations.entrySet().stream().flatMap(entry -> { + final String indexContainerPath = indexPath(entry.getKey()).buildAsString(); + return entry.getValue().stream().map(id -> indexContainerPath + INDEX_METADATA_FORMAT.blobName(id)); + })).map(absolutePath -> { + assert absolutePath.startsWith(basePath); + return absolutePath.substring(basePathLen); + }).iterator(); + } } /** From 3ea97796eb9a4c150cbbdfd59278005cb1c9a613 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 11 Oct 2023 07:05:13 +0100 Subject: [PATCH 145/176] Rename args in cleanup process to match deletion process (#100620) Relates #100568 --- .../blobstore/BlobStoreRepository.java | 111 ++++++++++-------- 1 file changed, 61 insertions(+), 50 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 98d725b9d1367..1e2969b255877 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -785,16 +785,16 @@ public RepositoryStats stats() { /** * Loads {@link RepositoryData} ensuring that it is consistent with the given {@code rootBlobs} as well of the assumed generation. * - * @param repositoryStateId Expected repository generation - * @param rootBlobs Blobs at the repository root + * @param repositoryDataGeneration Expected repository generation + * @param rootBlobs Blobs at the repository root * @return RepositoryData */ - private RepositoryData safeRepositoryData(long repositoryStateId, Map rootBlobs) { + private RepositoryData safeRepositoryData(long repositoryDataGeneration, Map rootBlobs) { final long generation = latestGeneration(rootBlobs.keySet()); final long genToLoad; final RepositoryData cached; if (bestEffortConsistency) { - genToLoad = latestKnownRepoGen.accumulateAndGet(repositoryStateId, Math::max); + genToLoad = latestKnownRepoGen.accumulateAndGet(repositoryDataGeneration, Math::max); cached = null; } else { genToLoad = latestKnownRepoGen.get(); @@ -813,11 +813,11 @@ private RepositoryData safeRepositoryData(long repositoryStateId, Map resolveFilesToDelete(Collection deletedSnapshots, - Map foundIndices, - Map rootBlobs, - RepositoryData newRepoData, + Collection snapshotIds, + Map originalIndexContainers, + Map originalRootBlobs, + RepositoryData newRepositoryData, ActionListener listener ) { final var blobsDeleted = new AtomicLong(); final var bytesDeleted = new AtomicLong(); try (var listeners = new RefCountingListener(listener.map(ignored -> DeleteResult.of(blobsDeleted.get(), bytesDeleted.get())))) { - final List staleRootBlobs = staleRootBlobs(newRepoData, rootBlobs.keySet()); + final List staleRootBlobs = staleRootBlobs(newRepositoryData, originalRootBlobs.keySet()); if (staleRootBlobs.isEmpty() == false) { staleBlobDeleteRunner.enqueueTask(listeners.acquire(ref -> { try (ref) { - logStaleRootLevelBlobs(newRepoData.getGenId() - 1, deletedSnapshots, staleRootBlobs); + logStaleRootLevelBlobs(newRepositoryData.getGenId() - 1, snapshotIds, staleRootBlobs); deleteFromContainer(blobContainer(), staleRootBlobs.iterator()); for (final var staleRootBlob : staleRootBlobs) { - bytesDeleted.addAndGet(rootBlobs.get(staleRootBlob).length()); + bytesDeleted.addAndGet(originalRootBlobs.get(staleRootBlob).length()); } blobsDeleted.addAndGet(staleRootBlobs.size()); } catch (Exception e) { @@ -1343,23 +1342,23 @@ private void cleanupStaleBlobs( })); } - final var survivingIndexIds = newRepoData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet()); - for (final var indexEntry : foundIndices.entrySet()) { - final var indexSnId = indexEntry.getKey(); - if (survivingIndexIds.contains(indexSnId)) { + final var survivingIndexIds = newRepositoryData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet()); + for (final var indexEntry : originalIndexContainers.entrySet()) { + final var indexId = indexEntry.getKey(); + if (survivingIndexIds.contains(indexId)) { continue; } staleBlobDeleteRunner.enqueueTask(listeners.acquire(ref -> { try (ref) { - logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); + logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexId); final var deleteResult = indexEntry.getValue().delete(OperationPurpose.SNAPSHOT); blobsDeleted.addAndGet(deleteResult.blobsDeleted()); bytesDeleted.addAndGet(deleteResult.bytesDeleted()); - logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexSnId); + logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexId); } catch (IOException e) { logger.warn(() -> format(""" [%s] index %s is no longer part of any snapshot in the repository, \ - but failed to clean up its index folder""", metadata.name(), indexSnId), e); + but failed to clean up its index folder""", metadata.name(), indexId), e); } })); } @@ -1386,40 +1385,45 @@ private void cleanupStaleBlobs( *
  • Deleting stale indices
  • *
  • Deleting unreferenced root level blobs
  • * - * @param repositoryStateId Current repository state id - * @param repositoryMetaVersion version of the updated repository metadata to write + * @param originalRepositoryDataGeneration Current repository state id + * @param repositoryFormatIndexVersion version of the updated repository metadata to write * @param listener Listener to complete when done */ - public void cleanup(long repositoryStateId, IndexVersion repositoryMetaVersion, ActionListener listener) { + public void cleanup( + long originalRepositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, + ActionListener listener + ) { try { if (isReadOnly()) { throw new RepositoryException(metadata.name(), "cannot run cleanup on readonly repository"); } - Map rootBlobs = blobContainer().listBlobs(OperationPurpose.SNAPSHOT); - final RepositoryData repositoryData = safeRepositoryData(repositoryStateId, rootBlobs); - final Map foundIndices = blobStore().blobContainer(indicesPath()).children(OperationPurpose.SNAPSHOT); - final Set survivingIndexIds = repositoryData.getIndices() + Map originalRootBlobs = blobContainer().listBlobs(OperationPurpose.SNAPSHOT); + final RepositoryData originalRepositoryData = safeRepositoryData(originalRepositoryDataGeneration, originalRootBlobs); + final Map originalIndexContainers = blobStore().blobContainer(indicesPath()) + .children(OperationPurpose.SNAPSHOT); + final Set survivingIndexIds = originalRepositoryData.getIndices() .values() .stream() .map(IndexId::getId) .collect(Collectors.toSet()); - final List staleRootBlobs = staleRootBlobs(repositoryData, rootBlobs.keySet()); - if (survivingIndexIds.equals(foundIndices.keySet()) && staleRootBlobs.isEmpty()) { + final List staleRootBlobs = staleRootBlobs(originalRepositoryData, originalRootBlobs.keySet()); + if (survivingIndexIds.equals(originalIndexContainers.keySet()) && staleRootBlobs.isEmpty()) { // Nothing to clean up we return listener.onResponse(new RepositoryCleanupResult(DeleteResult.ZERO)); } else { // write new index-N blob to ensure concurrent operations will fail writeIndexGen( - repositoryData, - repositoryStateId, - repositoryMetaVersion, + originalRepositoryData, + originalRepositoryDataGeneration, + repositoryFormatIndexVersion, Function.identity(), listener.delegateFailureAndWrap( (l, v) -> cleanupStaleBlobs( Collections.emptyList(), - foundIndices, - rootBlobs, - repositoryData, + originalIndexContainers, + originalRootBlobs, + originalRepositoryData, l.map(RepositoryCleanupResult::new) ) ) @@ -1431,9 +1435,12 @@ public void cleanup(long repositoryStateId, IndexVersion repositoryMetaVersion, } // Finds all blobs directly under the repository root path that are not referenced by the current RepositoryData - private static List staleRootBlobs(RepositoryData repositoryData, Set rootBlobNames) { - final Set allSnapshotIds = repositoryData.getSnapshotIds().stream().map(SnapshotId::getUUID).collect(Collectors.toSet()); - return rootBlobNames.stream().filter(blob -> { + private static List staleRootBlobs(RepositoryData originalRepositoryData, Set originalRootBlobNames) { + final Set allSnapshotIds = originalRepositoryData.getSnapshotIds() + .stream() + .map(SnapshotId::getUUID) + .collect(Collectors.toSet()); + return originalRootBlobNames.stream().filter(blob -> { if (FsBlobContainer.isTempBlobName(blob)) { return true; } @@ -1452,7 +1459,7 @@ private static List staleRootBlobs(RepositoryData repositoryData, Set Long.parseLong(blob.substring(INDEX_FILE_PREFIX.length())); + return originalRepositoryData.getGenId() > Long.parseLong(blob.substring(INDEX_FILE_PREFIX.length())); } catch (NumberFormatException nfe) { // odd case of an extra file with the index- prefix that we can't identify return false; @@ -1462,17 +1469,21 @@ private static List staleRootBlobs(RepositoryData repositoryData, Set deletedSnapshots, List blobsToDelete) { + private void logStaleRootLevelBlobs( + long originalRepositoryDataGeneration, + Collection snapshotIds, + List blobsToDelete + ) { if (logger.isInfoEnabled()) { // If we're running root level cleanup as part of a snapshot delete we should not log the snapshot- and global metadata // blobs associated with the just deleted snapshots as they are expected to exist and not stale. Otherwise every snapshot // delete would also log a confusing INFO message about "stale blobs". - final Set blobNamesToIgnore = deletedSnapshots.stream() + final Set blobNamesToIgnore = snapshotIds.stream() .flatMap( snapshotId -> Stream.of( GLOBAL_METADATA_FORMAT.blobName(snapshotId.getUUID()), SNAPSHOT_FORMAT.blobName(snapshotId.getUUID()), - INDEX_FILE_PREFIX + previousGeneration + INDEX_FILE_PREFIX + originalRepositoryDataGeneration ) ) .collect(Collectors.toSet()); From 5cc90094e9b24203e92649b1470736e984f59e10 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 10 Oct 2023 23:09:08 -0700 Subject: [PATCH 146/176] Relax ValueSources check in OrdinalsGroupingOperator (#100566) ValuesSource can be Null instead of Bytes when a shard has no data for a specific field. This PR relaxes the check for ValueSources in the OrdinalsGroupingOperator. We will need to add more tests for OrdinalsGroupingOperator. Closes #100438 --- .../operator/OrdinalsGroupingOperator.java | 6 ---- .../xpack/esql/action/EsqlActionIT.java | 34 +++++++++++++++++++ 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java index 4dab7faa2a074..7c930118903cf 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java @@ -105,12 +105,6 @@ public OrdinalsGroupingOperator( DriverContext driverContext ) { Objects.requireNonNull(aggregatorFactories); - boolean bytesValues = sources.get(0).source() instanceof ValuesSource.Bytes; - for (int i = 1; i < sources.size(); i++) { - if (sources.get(i).source() instanceof ValuesSource.Bytes != bytesValues) { - throw new IllegalStateException("ValuesSources are mismatched"); - } - } this.sources = sources; this.docChannel = docChannel; this.groupingField = groupingField; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index f10ca17d741d8..0017a8600a013 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -35,6 +35,7 @@ import java.util.Arrays; import java.util.Comparator; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -1187,6 +1188,39 @@ public void testGroupingMultiValueByOrdinals() { } } + public void testUnsupportedTypesOrdinalGrouping() { + assertAcked( + client().admin().indices().prepareCreate("index-1").setMapping("f1", "type=keyword", "f2", "type=keyword", "v", "type=long") + ); + assertAcked( + client().admin().indices().prepareCreate("index-2").setMapping("f1", "type=object", "f2", "type=keyword", "v", "type=long") + ); + Map groups = new HashMap<>(); + int numDocs = randomIntBetween(10, 20); + for (int i = 0; i < numDocs; i++) { + String k = randomFrom("a", "b", "c"); + long v = randomIntBetween(1, 10); + groups.merge(k, v, Long::sum); + groups.merge(null, v, Long::sum); // null group + client().prepareIndex("index-1").setSource("f1", k, "v", v).get(); + client().prepareIndex("index-2").setSource("f2", k, "v", v).get(); + } + client().admin().indices().prepareRefresh("index-1", "index-2").get(); + for (String field : List.of("f1", "f2")) { + try (var resp = run("from index-1,index-2 | stats sum(v) by " + field)) { + Iterator> values = resp.values(); + Map actual = new HashMap<>(); + while (values.hasNext()) { + Iterator row = values.next(); + Long v = (Long) row.next(); + String k = (String) row.next(); + actual.put(k, v); + } + assertThat(actual, equalTo(groups)); + } + } + } + private void createNestedMappingIndex(String indexName) throws IOException { XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); From e5a1cd8cbd4e4255fc6244e6a610bf3ff66919a2 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 11 Oct 2023 09:16:03 +0200 Subject: [PATCH 147/176] Unmute and fix IdLoaderTests#testSynthesizeIdMultipleSegments again. (#100625) This time more segments were created than expected, because IWC#maxBufferedDocs was randomily set to a very small number causing more segments to be created then expected. I also run this test again with -Dtests.iters=1024 without failure. So hopefully this test will not fail again because of random test issues. Closes #100580 --- .../test/java/org/elasticsearch/index/mapper/IdLoaderTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java index b22d4269c7891..6712d1c40b4ee 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java @@ -69,7 +69,6 @@ public void testSynthesizeIdSimple() throws Exception { prepareIndexReader(indexAndForceMerge(routing, docs), verify, false); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100580") public void testSynthesizeIdMultipleSegments() throws Exception { var routingPaths = List.of("dim1"); var routing = createRouting(routingPaths); @@ -203,6 +202,7 @@ private void prepareIndexReader( IndexWriterConfig config = LuceneTestCase.newIndexWriterConfig(random(), new MockAnalyzer(random())); if (noMergePolicy) { config.setMergePolicy(NoMergePolicy.INSTANCE); + config.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); } Sort sort = new Sort( new SortField(TimeSeriesIdFieldMapper.NAME, SortField.Type.STRING, false), From 370c8266b15f76400ada22833e557dc606d3ec3e Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Wed, 11 Oct 2023 08:18:04 +0100 Subject: [PATCH 148/176] DSL waits for the tsdb time boundaries to lapse (#100470) TSDB indices are expected to receive a large amounts of writes whilst their time bounds are "active" (i.e they include `now`). This ensures TSDB doesn't execute any ingest disruptive operations (like delete, forcemerge, downsample) until the `end_time` for the TSDS backing indices has lapsed. --- docs/changelog/100470.yaml | 6 + .../lifecycle/DataStreamLifecycleService.java | 86 ++++++++--- .../DataStreamLifecycleServiceTests.java | 110 +++++++++++++ ...StreamLifecycleDownsampleDisruptionIT.java | 14 +- .../DataStreamLifecycleDownsampleIT.java | 42 ++++- .../downsample/DataStreamLifecycleDriver.java | 38 ++++- ...StreamLifecycleDownsamplingSecurityIT.java | 144 +++++++++++++----- 7 files changed, 368 insertions(+), 72 deletions(-) create mode 100644 docs/changelog/100470.yaml diff --git a/docs/changelog/100470.yaml b/docs/changelog/100470.yaml new file mode 100644 index 0000000000000..3408ae06f7fe9 --- /dev/null +++ b/docs/changelog/100470.yaml @@ -0,0 +1,6 @@ +pr: 100470 +summary: DSL waits for the tsdb time boundaries to lapse +area: Data streams +type: bug +issues: + - 99696 diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 5d85a199c4e3d..d1ea1b589b5a5 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -63,7 +63,9 @@ import org.elasticsearch.datastreams.lifecycle.downsampling.DeleteSourceAndAddDownsampleToDS; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.snapshots.SnapshotInProgressException; import org.elasticsearch.threadpool.ThreadPool; @@ -71,6 +73,7 @@ import java.io.Closeable; import java.time.Clock; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -140,7 +143,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab private final ThreadPool threadPool; final ResultDeduplicator transportActionsDeduplicator; final ResultDeduplicator clusterStateChangesDeduplicator; - private final LongSupplier nowSupplier; + private LongSupplier nowSupplier; private final Clock clock; private final DataStreamLifecycleErrorStore errorStore; private volatile boolean isMaster = false; @@ -304,11 +307,24 @@ void run(ClusterState state) { } } } - Set indicesBeingRemoved; + + Set indicesToExcludeForRemainingRun = new HashSet<>(); + // the following indices should not be considered for the remainder of this service run: + // 1) the write index as it's still getting writes and we'll have to roll it over when the conditions are met + // 2) tsds indices that are still within their time bounds (i.e. now < time_series.end_time) - we don't want these indices to be + // deleted, forcemerged, or downsampled as they're still expected to receive large amounts of writes + indicesToExcludeForRemainingRun.add(currentRunWriteIndex); + indicesToExcludeForRemainingRun.addAll( + timeSeriesIndicesStillWithinTimeBounds( + state.metadata(), + getTargetIndices(dataStream, indicesToExcludeForRemainingRun, state.metadata()::index), + nowSupplier + ) + ); + try { - indicesBeingRemoved = maybeExecuteRetention(state, dataStream); + indicesToExcludeForRemainingRun.addAll(maybeExecuteRetention(state, dataStream, indicesToExcludeForRemainingRun)); } catch (Exception e) { - indicesBeingRemoved = Set.of(); // individual index errors would be reported via the API action listener for every delete call // we could potentially record errors at a data stream level and expose it via the _data_stream API? logger.error( @@ -321,13 +337,6 @@ void run(ClusterState state) { ); } - // the following indices should not be considered for the remainder of this service run: - // 1) the write index as it's still getting writes and we'll have to roll it over when the conditions are met - // 2) we exclude any indices that we're in the process of deleting because they'll be gone soon anyway - Set indicesToExcludeForRemainingRun = new HashSet<>(); - indicesToExcludeForRemainingRun.add(currentRunWriteIndex); - indicesToExcludeForRemainingRun.addAll(indicesBeingRemoved); - try { indicesToExcludeForRemainingRun.addAll( maybeExecuteForceMerge(state, getTargetIndices(dataStream, indicesToExcludeForRemainingRun, state.metadata()::index)) @@ -372,6 +381,30 @@ void run(ClusterState state) { ); } + // visible for testing + static Set timeSeriesIndicesStillWithinTimeBounds(Metadata metadata, List targetIndices, LongSupplier nowSupplier) { + Set tsIndicesWithinBounds = new HashSet<>(); + for (Index index : targetIndices) { + IndexMetadata backingIndex = metadata.index(index); + assert backingIndex != null : "the data stream backing indices must exist"; + if (IndexSettings.MODE.get(backingIndex.getSettings()) == IndexMode.TIME_SERIES) { + Instant configuredEndTime = IndexSettings.TIME_SERIES_END_TIME.get(backingIndex.getSettings()); + assert configuredEndTime != null + : "a time series index must have an end time configured but [" + index.getName() + "] does not"; + if (nowSupplier.getAsLong() <= configuredEndTime.toEpochMilli()) { + logger.trace( + "Data stream lifecycle will not perform any operations in this run on time series index [{}] because " + + "its configured [{}] end time has not lapsed", + index.getName(), + configuredEndTime + ); + tsIndicesWithinBounds.add(index); + } + } + } + return tsIndicesWithinBounds; + } + /** * Data stream lifecycle supports configuring multiple rounds of downsampling for each managed index. When attempting to execute * downsampling we iterate through the ordered rounds of downsampling that match an index (ordered ascending according to the `after` @@ -716,11 +749,13 @@ private void maybeExecuteRollover(ClusterState state, DataStream dataStream) { /** * This method sends requests to delete any indices in the datastream that exceed its retention policy. It returns the set of indices * it has sent delete requests for. - * @param state The cluster state from which to get index metadata - * @param dataStream The datastream + * + * @param state The cluster state from which to get index metadata + * @param dataStream The datastream + * @param indicesToExcludeForRemainingRun Indices to exclude from retention even if it would be time for them to be deleted * @return The set of indices that delete requests have been sent for */ - private Set maybeExecuteRetention(ClusterState state, DataStream dataStream) { + private Set maybeExecuteRetention(ClusterState state, DataStream dataStream, Set indicesToExcludeForRemainingRun) { TimeValue retention = getRetentionConfiguration(dataStream); Set indicesToBeRemoved = new HashSet<>(); if (retention != null) { @@ -728,14 +763,16 @@ private Set maybeExecuteRetention(ClusterState state, DataStream dataStre List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier); for (Index index : backingIndicesOlderThanRetention) { - indicesToBeRemoved.add(index); - IndexMetadata backingIndex = metadata.index(index); - assert backingIndex != null : "the data stream backing indices must exist"; - - // there's an opportunity here to batch the delete requests (i.e. delete 100 indices / request) - // let's start simple and reevaluate - String indexName = backingIndex.getIndex().getName(); - deleteIndexOnce(indexName, "the lapsed [" + retention + "] retention period"); + if (indicesToExcludeForRemainingRun.contains(index) == false) { + indicesToBeRemoved.add(index); + IndexMetadata backingIndex = metadata.index(index); + assert backingIndex != null : "the data stream backing indices must exist"; + + // there's an opportunity here to batch the delete requests (i.e. delete 100 indices / request) + // let's start simple and reevaluate + String indexName = backingIndex.getIndex().getName(); + deleteIndexOnce(indexName, "the lapsed [" + retention + "] retention period"); + } } } return indicesToBeRemoved; @@ -1227,6 +1264,11 @@ public DataStreamLifecycleErrorStore getErrorStore() { return errorStore; } + // visible for testing + public void setNowSupplier(LongSupplier nowSupplier) { + this.nowSupplier = nowSupplier; + } + /** * This is a ClusterStateTaskListener that writes the force_merge_completed_timestamp into the cluster state. It is meant to run in * STATE_UPDATE_TASK_EXECUTOR. diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index b1679b5fa6701..f1e74a936e781 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling; import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling.Round; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -59,6 +60,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -80,6 +82,7 @@ import java.time.Clock; import java.time.Instant; import java.time.ZoneId; +import java.time.temporal.ChronoUnit; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -105,6 +108,7 @@ import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService.TARGET_MERGE_FACTOR_VALUE; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -246,6 +250,49 @@ public void testRetentionNotExecutedDueToAge() { assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); } + public void testRetentionNotExecutedForTSIndicesWithinTimeBounds() { + Instant currentTime = Instant.now().truncatedTo(ChronoUnit.MILLIS); + // These ranges are on the edge of each other temporal boundaries. + Instant start1 = currentTime.minus(6, ChronoUnit.HOURS); + Instant end1 = currentTime.minus(4, ChronoUnit.HOURS); + Instant start2 = currentTime.minus(4, ChronoUnit.HOURS); + Instant end2 = currentTime.plus(2, ChronoUnit.HOURS); + Instant start3 = currentTime.plus(2, ChronoUnit.HOURS); + Instant end3 = currentTime.plus(4, ChronoUnit.HOURS); + + String dataStreamName = "logs_my-app_prod"; + var clusterState = DataStreamTestHelper.getClusterStateWithDataStream( + dataStreamName, + List.of(Tuple.tuple(start1, end1), Tuple.tuple(start2, end2), Tuple.tuple(start3, end3)) + ); + Metadata.Builder builder = Metadata.builder(clusterState.metadata()); + DataStream dataStream = builder.dataStream(dataStreamName); + builder.put( + new DataStream( + dataStreamName, + dataStream.getIndices(), + dataStream.getGeneration() + 1, + dataStream.getMetadata(), + dataStream.isHidden(), + dataStream.isReplicated(), + dataStream.isSystem(), + dataStream.isAllowCustomRouting(), + dataStream.getIndexMode(), + DataStreamLifecycle.newBuilder().dataRetention(0L).build() + ) + ); + clusterState = ClusterState.builder(clusterState).metadata(builder).build(); + + dataStreamLifecycleService.run(clusterState); + assertThat(clientSeenRequests.size(), is(2)); // rollover the write index and one delete request for the index that's out of the + // TS time bounds + assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); + TransportRequest deleteIndexRequest = clientSeenRequests.get(1); + assertThat(deleteIndexRequest, instanceOf(DeleteIndexRequest.class)); + // only the first generation index should be eligible for retention + assertThat(((DeleteIndexRequest) deleteIndexRequest).indices(), is(new String[] { dataStream.getIndices().get(0).getName() })); + } + public void testIlmManagedIndicesAreSkipped() { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); int numBackingIndices = 3; @@ -1186,6 +1233,69 @@ public void testDownsamplingWhenTargetIndexNameClashYieldsException() throws Exc assertThat(error, containsString("resource_already_exists_exception")); } + public void testTimeSeriesIndicesStillWithinTimeBounds() { + Instant currentTime = Instant.now().truncatedTo(ChronoUnit.MILLIS); + // These ranges are on the edge of each other temporal boundaries. + Instant start1 = currentTime.minus(6, ChronoUnit.HOURS); + Instant end1 = currentTime.minus(4, ChronoUnit.HOURS); + Instant start2 = currentTime.minus(4, ChronoUnit.HOURS); + Instant end2 = currentTime.plus(2, ChronoUnit.HOURS); + Instant start3 = currentTime.plus(2, ChronoUnit.HOURS); + Instant end3 = currentTime.plus(4, ChronoUnit.HOURS); + + String dataStreamName = "logs_my-app_prod"; + var clusterState = DataStreamTestHelper.getClusterStateWithDataStream( + dataStreamName, + List.of(Tuple.tuple(start1, end1), Tuple.tuple(start2, end2), Tuple.tuple(start3, end3)) + ); + DataStream dataStream = clusterState.getMetadata().dataStreams().get(dataStreamName); + + { + // test for an index for which `now` is outside its time bounds + Index firstGenIndex = dataStream.getIndices().get(0); + Set indices = DataStreamLifecycleService.timeSeriesIndicesStillWithinTimeBounds( + clusterState.metadata(), + // the end_time for the first generation has lapsed + List.of(firstGenIndex), + currentTime::toEpochMilli + ); + assertThat(indices.size(), is(0)); + } + + { + Set indices = DataStreamLifecycleService.timeSeriesIndicesStillWithinTimeBounds( + clusterState.metadata(), + // the end_time for the first generation has lapsed, but the other 2 generations are still within bounds + dataStream.getIndices(), + currentTime::toEpochMilli + ); + assertThat(indices.size(), is(2)); + assertThat(indices, containsInAnyOrder(dataStream.getIndices().get(1), dataStream.getIndices().get(2))); + } + + { + // non time_series indices are not within time bounds (they don't have any) + IndexMetadata indexMeta = IndexMetadata.builder(randomAlphaOfLengthBetween(10, 30)) + .settings( + Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) + .build() + ) + .build(); + + Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMeta, true).build(); + + Set indices = DataStreamLifecycleService.timeSeriesIndicesStillWithinTimeBounds( + newMetadata, + List.of(indexMeta.getIndex()), + currentTime::toEpochMilli + ); + assertThat(indices.size(), is(0)); + } + } + /* * Creates a test cluster state with the given indexName. If customDataStreamLifecycleMetadata is not null, it is added as the value * of the index's custom metadata named "data_stream_lifecycle". diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java index 166f41fa063ca..5bd20ce51a57d 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java @@ -35,6 +35,7 @@ import java.util.function.Consumer; import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.getBackingIndices; +import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.putTSDBIndexTemplate; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -77,8 +78,19 @@ public void testDataStreamLifecycleDownsampleRollingRestart() throws Exception { ) ) .build(); - int indexedDocs = DataStreamLifecycleDriver.setupDataStreamAndIngestDocs(client(), dataStreamName, lifecycle, DOC_COUNT); + DataStreamLifecycleDriver.setupTSDBDataStreamAndIngestDocs( + client(), + dataStreamName, + "1986-01-08T23:40:53.384Z", + "2022-01-08T23:40:53.384Z", + lifecycle, + DOC_COUNT, + "1990-09-09T18:00:00" + ); + // before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with + // testing so DSL doesn't have to wait for the end_time to lapse) + putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle); client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet(); // DSL runs every second and it has to tail forcemerge the index (2 seconds) and mark it as read-only (2s) before it starts diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java index cf5e79982d836..c38ed182abc64 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java @@ -32,6 +32,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.getBackingIndices; +import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.putTSDBIndexTemplate; import static org.hamcrest.Matchers.is; public class DataStreamLifecycleDownsampleIT extends ESIntegTestCase { @@ -68,7 +69,15 @@ public void testDownsampling() throws Exception { ) .build(); - DataStreamLifecycleDriver.setupDataStreamAndIngestDocs(client(), dataStreamName, lifecycle, DOC_COUNT); + DataStreamLifecycleDriver.setupTSDBDataStreamAndIngestDocs( + client(), + dataStreamName, + "1986-01-08T23:40:53.384Z", + "2022-01-08T23:40:53.384Z", + lifecycle, + DOC_COUNT, + "1990-09-09T18:00:00" + ); List backingIndices = getBackingIndices(client(), dataStreamName); String firstGenerationBackingIndex = backingIndices.get(0); @@ -85,6 +94,9 @@ public void testDownsampling() throws Exception { witnessedDownsamplingIndices.add(tenSecondsDownsampleIndex); } }); + // before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with + // testing so DSL doesn't have to wait for the end_time to lapse) + putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle); client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet(); @@ -127,7 +139,15 @@ public void testDownsamplingOnlyExecutesTheLastMatchingRound() throws Exception ) ) .build(); - DataStreamLifecycleDriver.setupDataStreamAndIngestDocs(client(), dataStreamName, lifecycle, DOC_COUNT); + DataStreamLifecycleDriver.setupTSDBDataStreamAndIngestDocs( + client(), + dataStreamName, + "1986-01-08T23:40:53.384Z", + "2022-01-08T23:40:53.384Z", + lifecycle, + DOC_COUNT, + "1990-09-09T18:00:00" + ); List backingIndices = getBackingIndices(client(), dataStreamName); String firstGenerationBackingIndex = backingIndices.get(0); @@ -144,7 +164,9 @@ public void testDownsamplingOnlyExecutesTheLastMatchingRound() throws Exception witnessedDownsamplingIndices.add(tenSecondsDownsampleIndex); } }); - + // before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with + // testing so DSL doesn't have to wait for the end_time to lapse) + putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle); client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet(); assertBusy(() -> { @@ -182,7 +204,15 @@ public void testUpdateDownsampleRound() throws Exception { ) .build(); - DataStreamLifecycleDriver.setupDataStreamAndIngestDocs(client(), dataStreamName, lifecycle, DOC_COUNT); + DataStreamLifecycleDriver.setupTSDBDataStreamAndIngestDocs( + client(), + dataStreamName, + "1986-01-08T23:40:53.384Z", + "2022-01-08T23:40:53.384Z", + lifecycle, + DOC_COUNT, + "1990-09-09T18:00:00" + ); List backingIndices = getBackingIndices(client(), dataStreamName); String firstGenerationBackingIndex = backingIndices.get(0); @@ -199,7 +229,9 @@ public void testUpdateDownsampleRound() throws Exception { witnessedDownsamplingIndices.add(tenSecondsDownsampleIndex); } }); - + // before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with + // testing so DSL doesn't have to wait for the end_time to lapse) + putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle); client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet(); assertBusy(() -> { diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java index be71c546a9d4c..d704f3bf93c54 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDriver.java @@ -37,6 +37,8 @@ import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; +import java.time.LocalDateTime; +import java.time.ZoneId; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -62,10 +64,17 @@ public class DataStreamLifecycleDriver { public static final String FIELD_DIMENSION_2 = "dimension_long"; public static final String FIELD_METRIC_COUNTER = "counter"; - public static int setupDataStreamAndIngestDocs(Client client, String dataStreamName, DataStreamLifecycle lifecycle, int docCount) - throws IOException { - putTSDBIndexTemplate(client, dataStreamName + "*", lifecycle); - return indexDocuments(client, dataStreamName, docCount); + public static int setupTSDBDataStreamAndIngestDocs( + Client client, + String dataStreamName, + @Nullable String startTime, + @Nullable String endTime, + DataStreamLifecycle lifecycle, + int docCount, + String firstDocTimestamp + ) throws IOException { + putTSDBIndexTemplate(client, dataStreamName + "*", startTime, endTime, lifecycle); + return indexDocuments(client, dataStreamName, docCount, firstDocTimestamp); } public static List getBackingIndices(Client client, String dataStreamName) { @@ -76,10 +85,24 @@ public static List getBackingIndices(Client client, String dataStreamNam return getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().stream().map(Index::getName).toList(); } - private static void putTSDBIndexTemplate(Client client, String pattern, DataStreamLifecycle lifecycle) throws IOException { + public static void putTSDBIndexTemplate( + Client client, + String pattern, + @Nullable String startTime, + @Nullable String endTime, + DataStreamLifecycle lifecycle + ) throws IOException { Settings.Builder settings = indexSettings(1, 0).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)); + if (Strings.hasText(startTime)) { + settings.put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime); + } + + if (Strings.hasText(endTime)) { + settings.put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime); + } + XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties"); mapping.startObject(FIELD_TIMESTAMP).field("type", "date").endObject(); @@ -129,9 +152,10 @@ private static void putComposableIndexTemplate( client.execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } - private static int indexDocuments(Client client, String dataStreamName, int docCount) { + private static int indexDocuments(Client client, String dataStreamName, int docCount, String firstDocTimestamp) { final Supplier sourceSupplier = () -> { - final String ts = randomDateForInterval(new DateHistogramInterval("1s"), System.currentTimeMillis()); + long startTime = LocalDateTime.parse(firstDocTimestamp).atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); + final String ts = randomDateForInterval(new DateHistogramInterval("1s"), startTime); double counterValue = DATE_FORMATTER.parseMillis(ts); final List dimensionValues = new ArrayList<>(5); for (int j = 0; j < randomIntBetween(1, 5); j++) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java index 8311d0f613175..cdddd0a5e5fe0 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java @@ -56,6 +56,10 @@ import org.elasticsearch.xpack.wildcard.Wildcard; import java.io.IOException; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -130,18 +134,15 @@ public void testDownsamplingAuthorized() throws Exception { ) .build(); - setupDataStreamAndIngestDocs(client(), dataStreamName, lifecycle, 10_000); - waitAndAssertDownsamplingCompleted(dataStreamName); - } - - @TestLogging(value = "org.elasticsearch.datastreams.lifecycle:TRACE", reason = "debugging") - public void testSystemDataStreamConfigurationWithDownsampling() throws Exception { - String dataStreamName = SystemDataStreamWithDownsamplingConfigurationPlugin.SYSTEM_DATA_STREAM_NAME; - indexDocuments(client(), dataStreamName, 10_000); - waitAndAssertDownsamplingCompleted(dataStreamName); - } - - private void waitAndAssertDownsamplingCompleted(String dataStreamName) throws Exception { + setupDataStreamAndIngestDocs( + client(), + dataStreamName, + "1986-01-08T23:40:53.384Z", + "2022-01-08T23:40:53.384Z", + lifecycle, + 10_000, + "1990-09-09T18:00:00" + ); List backingIndices = getDataStreamBackingIndices(dataStreamName); String firstGenerationBackingIndex = backingIndices.get(0).getName(); String firstRoundDownsamplingIndex = "downsample-5m-" + firstGenerationBackingIndex; @@ -158,6 +159,9 @@ private void waitAndAssertDownsamplingCompleted(String dataStreamName) throws Ex } }); + // before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with + // testing so DSL doesn't have to wait for the end_time to lapse) + putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle); client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet(); assertBusy(() -> { @@ -188,6 +192,52 @@ private void waitAndAssertDownsamplingCompleted(String dataStreamName) throws Ex }, 30, TimeUnit.SECONDS); } + @TestLogging(value = "org.elasticsearch.datastreams.lifecycle:TRACE", reason = "debugging") + public void testSystemDataStreamConfigurationWithDownsampling() throws Exception { + String dataStreamName = SystemDataStreamWithDownsamplingConfigurationPlugin.SYSTEM_DATA_STREAM_NAME; + indexDocuments(client(), dataStreamName, 10_000, Instant.now().toEpochMilli()); + List backingIndices = getDataStreamBackingIndices(dataStreamName); + String firstGenerationBackingIndex = backingIndices.get(0).getName(); + String secondRoundDownsamplingIndex = "downsample-10m-" + firstGenerationBackingIndex; + + Set witnessedDownsamplingIndices = new HashSet<>(); + clusterService().addListener(event -> { + if (event.indicesCreated().contains(secondRoundDownsamplingIndex)) { + witnessedDownsamplingIndices.add(secondRoundDownsamplingIndex); + } + }); + + DataStreamLifecycleService masterDataStreamLifecycleService = internalCluster().getCurrentMasterNodeInstance( + DataStreamLifecycleService.class + ); + try { + // we can't update the index template backing a system data stream, so we run DSL "in the future" + // this means that only one round of downsampling will execute due to an optimisation we have in DSL to execute the last + // matching round + masterDataStreamLifecycleService.setNowSupplier(() -> Instant.now().plus(50, ChronoUnit.DAYS).toEpochMilli()); + client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet(); + + assertBusy(() -> { + assertNoAuthzErrors(); + assertThat(witnessedDownsamplingIndices.contains(secondRoundDownsamplingIndex), is(true)); + }, 30, TimeUnit.SECONDS); + + assertBusy(() -> { + assertNoAuthzErrors(); + List dsBackingIndices = getDataStreamBackingIndices(dataStreamName); + + assertThat(dsBackingIndices.size(), is(2)); + String writeIndex = dsBackingIndices.get(1).getName(); + assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); + // the last downsampling round must remain in the data stream + assertThat(dsBackingIndices.get(0).getName(), is(secondRoundDownsamplingIndex)); + }, 30, TimeUnit.SECONDS); + } finally { + // restore a real nowSupplier so other tests running against this cluster succeed + masterDataStreamLifecycleService.setNowSupplier(() -> Instant.now().toEpochMilli()); + } + } + private Map collectErrorsFromStoreAsMap() { Iterable lifecycleServices = internalCluster().getInstances(DataStreamLifecycleService.class); Map indicesAndErrors = new HashMap<>(); @@ -221,15 +271,36 @@ private void assertNoAuthzErrors() { } } - private void setupDataStreamAndIngestDocs(Client client, String dataStreamName, DataStreamLifecycle lifecycle, int docCount) - throws IOException { - putTSDBIndexTemplate(client, dataStreamName + "*", lifecycle); - indexDocuments(client, dataStreamName, docCount); + private void setupDataStreamAndIngestDocs( + Client client, + String dataStreamName, + @Nullable String startTime, + @Nullable String endTime, + DataStreamLifecycle lifecycle, + int docCount, + String firstDocTimestamp + ) throws IOException { + putTSDBIndexTemplate(client, dataStreamName + "*", startTime, endTime, lifecycle); + long startTimestamp = LocalDateTime.parse(firstDocTimestamp).atZone(ZoneId.of("UTC")).toInstant().toEpochMilli(); + indexDocuments(client, dataStreamName, docCount, startTimestamp); } - private void putTSDBIndexTemplate(Client client, String pattern, DataStreamLifecycle lifecycle) throws IOException { + private void putTSDBIndexTemplate( + Client client, + String pattern, + @Nullable String startTime, + @Nullable String endTime, + DataStreamLifecycle lifecycle + ) throws IOException { Settings.Builder settings = indexSettings(1, 0).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)); + if (Strings.hasText(startTime)) { + settings.put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime); + } + + if (Strings.hasText(endTime)) { + settings.put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime); + } CompressedXContent mapping = getTSDBMappings(); putComposableIndexTemplate(client, "id1", mapping, List.of(pattern), settings.build(), null, lifecycle); } @@ -275,9 +346,9 @@ private void putComposableIndexTemplate( client.execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); } - private void indexDocuments(Client client, String dataStreamName, int docCount) { + private void indexDocuments(Client client, String dataStreamName, int docCount, long startTime) { final Supplier sourceSupplier = () -> { - final String ts = randomDateForInterval(new DateHistogramInterval("1s"), System.currentTimeMillis()); + final String ts = randomDateForInterval(new DateHistogramInterval("1s"), startTime); double counterValue = DATE_FORMATTER.parseMillis(ts); final List dimensionValues = new ArrayList<>(5); for (int j = 0; j < randomIntBetween(1, 5); j++) { @@ -336,27 +407,26 @@ private void bulkIndex(Client client, String dataStreamName, Supplier getSystemDataStreamDescriptors() { - DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder() - .downsampling( - new DataStreamLifecycle.Downsampling( - List.of( - new DataStreamLifecycle.Downsampling.Round( - TimeValue.timeValueMillis(0), - new DownsampleConfig(new DateHistogramInterval("5m")) - ), - new DataStreamLifecycle.Downsampling.Round( - TimeValue.timeValueSeconds(10), - new DownsampleConfig(new DateHistogramInterval("10m")) - ) + public static final DataStreamLifecycle LIFECYCLE = DataStreamLifecycle.newBuilder() + .downsampling( + new DataStreamLifecycle.Downsampling( + List.of( + new DataStreamLifecycle.Downsampling.Round( + TimeValue.timeValueMillis(0), + new DownsampleConfig(new DateHistogramInterval("5m")) + ), + new DataStreamLifecycle.Downsampling.Round( + TimeValue.timeValueSeconds(10), + new DownsampleConfig(new DateHistogramInterval("10m")) ) ) ) - .build(); + ) + .build(); + static final String SYSTEM_DATA_STREAM_NAME = ".fleet-actions-results"; + @Override + public Collection getSystemDataStreamDescriptors() { Settings.Builder settings = indexSettings(1, 0).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)); @@ -368,7 +438,7 @@ public Collection getSystemDataStreamDescriptors() { SystemDataStreamDescriptor.Type.EXTERNAL, new ComposableIndexTemplate( List.of(SYSTEM_DATA_STREAM_NAME), - new Template(settings.build(), getTSDBMappings(), null, lifecycle), + new Template(settings.build(), getTSDBMappings(), null, LIFECYCLE), null, null, null, From 616960c2cf4bf7179499f729d208111493213085 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 11 Oct 2023 09:32:57 +0100 Subject: [PATCH 149/176] Manually update the min CCS version to the version used by 8.11 (#100582) The build automation for 8.11 should have done this, but it didn't work at the time --- server/src/main/java/org/elasticsearch/TransportVersions.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 6267fb3b86ae4..5d51a7959b5fa 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -198,7 +198,7 @@ static TransportVersion def(int id) { * Reference to the minimum transport version that can be used with CCS. * This should be the transport version used by the previous minor release. */ - public static final TransportVersion MINIMUM_CCS_VERSION = V_8_500_061; + public static final TransportVersion MINIMUM_CCS_VERSION = ML_PACKAGE_LOADER_PLATFORM_ADDED; static final NavigableMap VERSION_IDS = getAllVersionIds(TransportVersions.class); From 29e3d2829b365e011346271ceaa7d5f2746318fe Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Wed, 11 Oct 2023 11:48:04 +0300 Subject: [PATCH 150/176] ESQL: fix non-null value being returned for unsupported data types in ValueSources (#100656) * Refine (and fix) all cases where an unsupported data type field's values are returned from SourceValues. * Improve unsupported data types handling in TopN --- docs/changelog/100656.yaml | 6 ++ .../compute/lucene/ValueSources.java | 26 +++--- .../topn/DefaultSortableTopNEncoder.java | 2 +- .../compute/operator/topn/TopNEncoder.java | 5 + .../topn/UnsupportedTypesTopNEncoder.java | 45 +++++++++ .../resources/rest-api-spec/test/40_tsdb.yml | 6 ++ .../test/40_unsupported_types.yml | 93 +++++++++++++++++++ .../rest-api-spec/test/50_index_patterns.yml | 4 + .../esql/planner/LocalExecutionPlanner.java | 2 + 9 files changed, 175 insertions(+), 14 deletions(-) create mode 100644 docs/changelog/100656.yaml create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/UnsupportedTypesTopNEncoder.java diff --git a/docs/changelog/100656.yaml b/docs/changelog/100656.yaml new file mode 100644 index 0000000000000..1ee9a2ad0e47a --- /dev/null +++ b/docs/changelog/100656.yaml @@ -0,0 +1,6 @@ +pr: 100656 +summary: "ESQL: fix non-null value being returned for unsupported data types in `ValueSources`" +area: ES|QL +type: bug +issues: + - 100048 diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java index e5ce5436990b7..29a539b1e068e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java @@ -69,6 +69,18 @@ public static List sources( sources.add(new ValueSourceInfo(new NullValueSourceType(), new NullValueSource(), elementType, ctx.getIndexReader())); continue; // the field does not exist in this context } + if (asUnsupportedSource) { + sources.add( + new ValueSourceInfo( + new UnsupportedValueSourceType(fieldType.typeName()), + new UnsupportedValueSource(null), + elementType, + ctx.getIndexReader() + ) + ); + HeaderWarning.addWarning("Field [{}] cannot be retrieved, it is unsupported or not indexed; returning null", fieldName); + continue; + } if (fieldType.hasDocValues() == false) { // MatchOnlyTextFieldMapper class lives in the mapper-extras module. We use string equality @@ -99,19 +111,7 @@ public static List sources( var fieldContext = new FieldContext(fieldName, fieldData, fieldType); var vsType = fieldData.getValuesSourceType(); var vs = vsType.getField(fieldContext, null); - - if (asUnsupportedSource) { - sources.add( - new ValueSourceInfo( - new UnsupportedValueSourceType(fieldType.typeName()), - new UnsupportedValueSource(vs), - elementType, - ctx.getIndexReader() - ) - ); - } else { - sources.add(new ValueSourceInfo(vsType, vs, elementType, ctx.getIndexReader())); - } + sources.add(new ValueSourceInfo(vsType, vs, elementType, ctx.getIndexReader())); } return sources; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/DefaultSortableTopNEncoder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/DefaultSortableTopNEncoder.java index 8634d87e2932f..6ccde6b76ce13 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/DefaultSortableTopNEncoder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/DefaultSortableTopNEncoder.java @@ -23,7 +23,7 @@ public BytesRef decodeBytesRef(BytesRef bytes, BytesRef scratch) { @Override public String toString() { - return "DefaultUnsortable"; + return "DefaultSortable"; } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNEncoder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNEncoder.java index 2d8f2666ff2f2..f1fb7cb7736c5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNEncoder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/TopNEncoder.java @@ -41,6 +41,11 @@ public interface TopNEncoder { */ VersionTopNEncoder VERSION = new VersionTopNEncoder(); + /** + * Placeholder encoder for unsupported data types. + */ + UnsupportedTypesTopNEncoder UNSUPPORTED = new UnsupportedTypesTopNEncoder(); + void encodeLong(long value, BreakingBytesRefBuilder bytesRefBuilder); long decodeLong(BytesRef bytes); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/UnsupportedTypesTopNEncoder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/UnsupportedTypesTopNEncoder.java new file mode 100644 index 0000000000000..d80d70970409e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/UnsupportedTypesTopNEncoder.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator.topn; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; + +/** + * TopNEncoder for data types that are unsupported. This is just a placeholder class, reaching the encode/decode methods here is a bug. + * + * While this class is needed to build the TopNOperator value and key extractors infrastructure, encoding/decoding is needed + * when actually sorting on a field (which shouldn't be possible for unsupported data types) using key extractors, or when encoding/decoding + * unsupported data types fields values (which should always be "null" by convention) using value extractors. + */ +class UnsupportedTypesTopNEncoder extends SortableTopNEncoder { + @Override + public int encodeBytesRef(BytesRef value, BreakingBytesRefBuilder bytesRefBuilder) { + throw new UnsupportedOperationException("Encountered a bug; trying to encode an unsupported data type value for TopN"); + } + + @Override + public BytesRef decodeBytesRef(BytesRef bytes, BytesRef scratch) { + throw new UnsupportedOperationException("Encountered a bug; trying to decode an unsupported data type value for TopN"); + } + + @Override + public String toString() { + return "UnsupportedTypesTopNEncoder"; + } + + @Override + public TopNEncoder toSortable() { + return this; + } + + @Override + public TopNEncoder toUnsortable() { + return this; + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml index 14ae1ff98d8ad..895a1718b2cbc 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml @@ -106,6 +106,8 @@ setup: --- load everything: - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" esql.query: body: query: 'from test' @@ -156,6 +158,8 @@ filter on counter: --- from doc with aggregate_metric_double: - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" esql.query: body: query: 'from test2' @@ -183,6 +187,8 @@ stats on aggregate_metric_double: --- from index pattern unsupported counter: - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" esql.query: body: query: 'FROM test*' diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml index 44af9559598ab..ad0c7b516fde1 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml @@ -263,3 +263,96 @@ unsupported: - match: { columns.0.name: shape } - match: { columns.0.type: unsupported } - length: { values: 0 } + +--- +unsupported with sort: + - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" + esql.query: + body: + query: 'from test | sort some_doc.bar' + + - match: { columns.0.name: aggregate_metric_double } + - match: { columns.0.type: unsupported } + - match: { columns.1.name: binary } + - match: { columns.1.type: unsupported } + - match: { columns.2.name: completion } + - match: { columns.2.type: unsupported } + - match: { columns.3.name: date_nanos } + - match: { columns.3.type: unsupported } + - match: { columns.4.name: date_range } + - match: { columns.4.type: unsupported } + - match: { columns.5.name: dense_vector } + - match: { columns.5.type: unsupported } + - match: { columns.6.name: double_range } + - match: { columns.6.type: unsupported } + - match: { columns.7.name: float_range } + - match: { columns.7.type: unsupported } + - match: { columns.8.name: geo_point } + - match: { columns.8.type: unsupported } + - match: { columns.9.name: geo_point_alias } + - match: { columns.9.type: unsupported } + - match: { columns.10.name: histogram } + - match: { columns.10.type: unsupported } + - match: { columns.11.name: integer_range } + - match: { columns.11.type: unsupported } + - match: { columns.12.name: ip_range } + - match: { columns.12.type: unsupported } + - match: { columns.13.name: long_range } + - match: { columns.13.type: unsupported } + - match: { columns.14.name: match_only_text } + - match: { columns.14.type: text } + - match: { columns.15.name: name } + - match: { columns.15.type: keyword } + - match: { columns.16.name: rank_feature } + - match: { columns.16.type: unsupported } + - match: { columns.17.name: rank_features } + - match: { columns.17.type: unsupported } + - match: { columns.18.name: search_as_you_type } + - match: { columns.18.type: unsupported } + - match: { columns.19.name: search_as_you_type._2gram } + - match: { columns.19.type: unsupported } + - match: { columns.20.name: search_as_you_type._3gram } + - match: { columns.20.type: unsupported } + - match: { columns.21.name: search_as_you_type._index_prefix } + - match: { columns.21.type: unsupported } + - match: { columns.22.name: shape } + - match: { columns.22.type: unsupported } + - match: { columns.23.name: some_doc.bar } + - match: { columns.23.type: long } + - match: { columns.24.name: some_doc.foo } + - match: { columns.24.type: keyword } + - match: { columns.25.name: text } + - match: { columns.25.type: text } + - match: { columns.26.name: token_count } + - match: { columns.26.type: integer } + + - length: { values: 1 } + - match: { values.0.0: null } + - match: { values.0.1: null } + - match: { values.0.2: null } + - match: { values.0.3: null } + - match: { values.0.4: null } + - match: { values.0.5: null } + - match: { values.0.6: null } + - match: { values.0.7: null } + - match: { values.0.8: null } + - match: { values.0.9: null } + - match: { values.0.10: null } + - match: { values.0.11: null } + - match: { values.0.12: null } + - match: { values.0.13: null } + - match: { values.0.14: "foo bar baz" } + - match: { values.0.15: Alice } + - match: { values.0.16: null } + - match: { values.0.17: null } + - match: { values.0.18: null } + - match: { values.0.19: null } + - match: { values.0.20: null } + - match: { values.0.21: null } + - match: { values.0.22: null } + - match: { values.0.23: 12 } + - match: { values.0.24: xy } + - match: { values.0.25: "foo bar" } + - match: { values.0.26: 3 } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml index 280a32aa10cd3..ff327b2592c88 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/50_index_patterns.yml @@ -267,6 +267,8 @@ disjoint_mappings: --- same_name_different_type: + - skip: + features: allowed_warnings_regex - do: indices.create: index: test1 @@ -307,6 +309,8 @@ same_name_different_type: - { "message": 2 } - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" esql.query: body: query: 'from test1,test2 ' diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 1c26de4a599f5..b86072e1b6da0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -380,6 +380,8 @@ private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerConte case "version" -> TopNEncoder.VERSION; case "boolean", "null", "byte", "short", "integer", "long", "double", "float", "half_float", "datetime", "date_period", "time_duration", "object", "nested", "scaled_float", "unsigned_long", "_doc" -> TopNEncoder.DEFAULT_SORTABLE; + // unsupported fields are encoded as BytesRef, we'll use the same encoder; all values should be null at this point + case "unsupported" -> TopNEncoder.UNSUPPORTED; default -> throw new EsqlIllegalArgumentException("No TopN sorting encoder for type " + inverse.get(channel).type()); }; } From c4e55ab14c9335c0fff343e94895fa0447e663d9 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 11 Oct 2023 09:54:09 +0100 Subject: [PATCH 151/176] Fix interruption of markAllocationIdAsInSync (#100610) `IndexShard#markAllocationIdAsInSync` is interruptible because it may block the thread on a monitor waiting for the local checkpoint to advance, but we lost the ability to interrupt it on a recovery cancellation in #95270. Closes #96578 Closes #100589 --- docs/changelog/100610.yaml | 7 ++ .../indices/recovery/IndexRecoveryIT.java | 108 ++++++++++++++++++ .../recovery/RecoverySourceHandler.java | 5 +- .../org/elasticsearch/test/ESTestCase.java | 13 ++- 4 files changed, 129 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/100610.yaml diff --git a/docs/changelog/100610.yaml b/docs/changelog/100610.yaml new file mode 100644 index 0000000000000..7423ce9225868 --- /dev/null +++ b/docs/changelog/100610.yaml @@ -0,0 +1,7 @@ +pr: 100610 +summary: Fix interruption of `markAllocationIdAsInSync` +area: Recovery +type: bug +issues: + - 96578 + - 100589 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index d3aed4a3e2bf2..f556486795c2a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -35,11 +35,15 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -70,6 +74,8 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.gateway.ReplicaShardAllocatorIT; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -85,6 +91,7 @@ import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.GlobalCheckpointListeners; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; @@ -122,7 +129,9 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -132,6 +141,7 @@ import static java.util.stream.Collectors.toList; import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; +import static org.elasticsearch.action.support.ActionTestUtils.assertNoFailureListener; import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.node.RecoverySettingsChunkSizePlugin.CHUNK_SIZE_SETTING; @@ -1688,6 +1698,104 @@ public void testWaitForClusterStateToBeAppliedOnSourceNode() throws Exception { } } + public void testDeleteIndexDuringFinalization() throws Exception { + internalCluster().startMasterOnlyNode(); + final var primaryNode = internalCluster().startDataOnlyNode(); + String indexName = "test-index"; + createIndex(indexName, indexSettings(1, 0).build()); + ensureGreen(indexName); + final List indexRequests = IntStream.range(0, between(10, 500)) + .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .toList(); + indexRandom(randomBoolean(), true, true, indexRequests); + assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); + + final var replicaNode = internalCluster().startDataOnlyNode(); + + final SubscribableListener recoveryCompleteListener = new SubscribableListener<>(); + final PlainActionFuture deleteListener = new PlainActionFuture<>(); + + final var threadPool = internalCluster().clusterService().threadPool(); + + final var indexId = internalCluster().clusterService().state().routingTable().index(indexName).getIndex(); + final var primaryIndexShard = internalCluster().getInstance(IndicesService.class, primaryNode) + .indexServiceSafe(indexId) + .getShard(0); + final var globalCheckpointBeforeRecovery = primaryIndexShard.getLastSyncedGlobalCheckpoint(); + + final var replicaNodeTransportService = asInstanceOf( + MockTransportService.class, + internalCluster().getInstance(TransportService.class, replicaNode) + ); + replicaNodeTransportService.addRequestHandlingBehavior( + PeerRecoveryTargetService.Actions.TRANSLOG_OPS, + (handler, request, channel, task) -> handler.messageReceived( + request, + new TestTransportChannel(ActionTestUtils.assertNoFailureListener(response -> { + // Process the TRANSLOG_OPS response on the replica (avoiding failing it due to a concurrent delete) but + // before sending the response back send another document to the primary, advancing the GCP to prevent the replica + // being marked as in-sync (NB below we delay the replica write until after the index is deleted) + client().prepareIndex(indexName).setSource("foo", "baz").execute(ActionListener.noop()); + + primaryIndexShard.addGlobalCheckpointListener( + globalCheckpointBeforeRecovery + 1, + new GlobalCheckpointListeners.GlobalCheckpointListener() { + @Override + public Executor executor() { + return EsExecutors.DIRECT_EXECUTOR_SERVICE; + } + + @Override + public void accept(long globalCheckpoint, Exception e) { + assertNull(e); + + // Now the GCP has advanced the replica won't be marked in-sync so respond to the TRANSLOG_OPS request + // to start recovery finalization + try { + channel.sendResponse(response); + } catch (IOException ex) { + fail(ex); + } + + // Wait a short while for finalization to block on advancing the replica's GCP and then delete the index + threadPool.schedule( + () -> client().admin().indices().prepareDelete(indexName).execute(deleteListener), + TimeValue.timeValueMillis(100), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); + } + }, + TimeValue.timeValueSeconds(10) + ); + })), + task + ) + ); + + // delay the delivery of the replica write until the end of the test so the replica never becomes in-sync + replicaNodeTransportService.addRequestHandlingBehavior( + BulkAction.NAME + "[s][r]", + (handler, request, channel, task) -> recoveryCompleteListener.addListener( + assertNoFailureListener(ignored -> handler.messageReceived(request, channel, task)) + ) + ); + + // Create the replica to trigger the whole process + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + // Wait for the index to be deleted + assertTrue(deleteListener.get(20, TimeUnit.SECONDS).isAcknowledged()); + + final var peerRecoverySourceService = internalCluster().getInstance(PeerRecoverySourceService.class, primaryNode); + assertBusy(() -> assertEquals(0, peerRecoverySourceService.numberOfOngoingRecoveries())); + recoveryCompleteListener.onResponse(null); + } + private void assertGlobalCheckpointIsStableAndSyncedInAllNodes(String indexName, List nodes, int shard) throws Exception { assertThat(nodes, is(not(empty()))); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index fc5df1a4aa282..81bc226102f62 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -426,7 +425,7 @@ public void onFailure(Exception e) { } static void runUnderPrimaryPermit( - CheckedRunnable action, + Runnable action, IndexShard primary, CancellableThreads cancellableThreads, ActionListener listener @@ -1260,7 +1259,7 @@ void finalizeRecovery(long targetLocalCheckpoint, long trimAboveSeqNo, ActionLis */ final SubscribableListener markInSyncStep = new SubscribableListener<>(); runUnderPrimaryPermit( - () -> shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint), + () -> cancellableThreads.execute(() -> shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint)), shard, cancellableThreads, markInSyncStep diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 540ef4cf1027b..9ccfbd2e25ca6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -114,6 +114,7 @@ import org.elasticsearch.xcontent.XContentParser.Token; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.Matchers; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -2036,7 +2037,17 @@ protected static boolean isTurkishLocale() { || Locale.getDefault().getLanguage().equals(new Locale("az").getLanguage()); } - public static void fail(Throwable t, String msg, Object... args) { + public static T fail(Throwable t, String msg, Object... args) { throw new AssertionError(org.elasticsearch.common.Strings.format(msg, args), t); } + + public static T fail(Throwable t) { + return fail(t, "unexpected"); + } + + @SuppressWarnings("unchecked") + public static T asInstanceOf(Class clazz, Object o) { + assertThat(o, Matchers.instanceOf(clazz)); + return (T) o; + } } From 5dc7cccf1368061d27689f0335d3cfa17ab1f57c Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Wed, 11 Oct 2023 12:00:52 +0300 Subject: [PATCH 152/176] Fix remaining logs tests (#100407) In this PR we convert the remaining 3 yaml logs tests to java rest tests. This should remove all the flaky tests from the data stream yaml suite that's why we are reenabling it. Closes: https://github.com/elastic/elasticsearch/issues/99911 Closes: https://github.com/elastic/elasticsearch/issues/97795 --- .../datastreams/EcsLogsDataStreamIT.java | 433 ++++++++++++++++++ .../datastreams/LogsDataStreamIT.java | 408 ++++++++++++++++- .../resources/ecs-logs/es-agent-ecs-log.json | 118 +++++ .../DataStreamsClientYamlTestSuiteIT.java | 2 - .../data_stream/230_logs_message_pipeline.yml | 114 ----- .../data_stream/240_logs_ecs_mappings.yml | 406 ---------------- .../data_stream/250_logs_no_subobjects.yml | 218 --------- .../test/data_stream/lifecycle/20_basic.yml | 3 + 8 files changed, 946 insertions(+), 756 deletions(-) create mode 100644 modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/EcsLogsDataStreamIT.java create mode 100644 modules/data-streams/src/javaRestTest/resources/ecs-logs/es-agent-ecs-log.json delete mode 100644 modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/230_logs_message_pipeline.yml delete mode 100644 modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/240_logs_ecs_mappings.yml delete mode 100644 modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/250_logs_no_subobjects.yml diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/EcsLogsDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/EcsLogsDataStreamIT.java new file mode 100644 index 0000000000000..7de4ed2f2843c --- /dev/null +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/EcsLogsDataStreamIT.java @@ -0,0 +1,433 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.core.PathUtils; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.datastreams.LogsDataStreamIT.createDataStream; +import static org.elasticsearch.datastreams.LogsDataStreamIT.getMappingProperties; +import static org.elasticsearch.datastreams.LogsDataStreamIT.getValueFromPath; +import static org.elasticsearch.datastreams.LogsDataStreamIT.getWriteBackingIndex; +import static org.elasticsearch.datastreams.LogsDataStreamIT.indexDoc; +import static org.elasticsearch.datastreams.LogsDataStreamIT.searchDocs; +import static org.elasticsearch.datastreams.LogsDataStreamIT.waitForLogs; +import static org.hamcrest.Matchers.is; + +public class EcsLogsDataStreamIT extends DisabledSecurityDataStreamTestCase { + + private static final String DATA_STREAM_NAME = "logs-generic-default"; + private RestClient client; + private String backingIndex; + + @Before + public void setup() throws Exception { + client = client(); + waitForLogs(client); + + { + Request request = new Request("PUT", "/_ingest/pipeline/logs@custom"); + request.setJsonEntity(""" + { + "processors": [ + { + "pipeline" : { + "name": "logs@json-message", + "description": "A pipeline that automatically parses JSON log events into top-level fields if they are such" + } + } + ] + } + """); + assertOK(client.performRequest(request)); + } + createDataStream(client, DATA_STREAM_NAME); + backingIndex = getWriteBackingIndex(client, DATA_STREAM_NAME); + } + + @After + public void cleanUp() throws IOException { + adminClient().performRequest(new Request("DELETE", "_data_stream/*")); + } + + @SuppressWarnings("unchecked") + public void testElasticAgentLogEcsMappings() throws Exception { + { + Path path = PathUtils.get(Thread.currentThread().getContextClassLoader().getResource("ecs-logs/es-agent-ecs-log.json").toURI()); + String agentLog = Files.readString(path); + indexDoc(client, DATA_STREAM_NAME, agentLog); + List results = searchDocs(client, DATA_STREAM_NAME, """ + { + "query": { + "term": { + "test": { + "value": "elastic-agent-log" + } + } + }, + "fields": ["message"] + } + """); + assertThat(results.size(), is(1)); + Map source = ((Map>) results.get(0)).get("_source"); + Map fields = ((Map>) results.get(0)).get("fields"); + + // timestamp from deserialized JSON message field should win + assertThat(source.get("@timestamp"), is("2023-05-16T13:49:40.374Z")); + assertThat( + ((Map>) source.get("kubernetes")).get("pod").get("name"), + is("elastic-agent-managed-daemonset-jwktj") + ); + // expecting the extracted message from within the original JSON-formatted message + assertThat(((List) fields.get("message")).get(0), is("Non-zero metrics in the last 30s")); + + Map properties = getMappingProperties(client, backingIndex); + assertThat(getValueFromPath(properties, List.of("@timestamp", "type")), is("date")); + assertThat(getValueFromPath(properties, List.of("message", "type")), is("match_only_text")); + assertThat( + getValueFromPath(properties, List.of("kubernetes", "properties", "pod", "properties", "name", "type")), + is("keyword") + ); + assertThat(getValueFromPath(properties, List.of("kubernetes", "properties", "pod", "properties", "ip", "type")), is("ip")); + assertThat(getValueFromPath(properties, List.of("kubernetes", "properties", "pod", "properties", "test_ip", "type")), is("ip")); + assertThat( + getValueFromPath( + properties, + List.of("kubernetes", "properties", "labels", "properties", "pod-template-generation", "type") + ), + is("keyword") + ); + assertThat(getValueFromPath(properties, List.of("log", "properties", "file", "properties", "path", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("log", "properties", "file", "properties", "path", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("host", "properties", "os", "properties", "name", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("host", "properties", "os", "properties", "name", "fields", "text", "type")), + is("match_only_text") + ); + } + } + + @SuppressWarnings("unchecked") + public void testGeneralMockupEcsMappings() throws Exception { + { + indexDoc(client, DATA_STREAM_NAME, """ + { + "start_timestamp": "not a date", + "start-timestamp": "not a date", + "timestamp.us": 1688550340718000, + "test": "mockup-ecs-log", + "registry": { + "data": { + "strings": ["C:\\\\rta\\\\red_ttp\\\\bin\\\\myapp.exe"] + } + }, + "process": { + "title": "ssh", + "executable": "/usr/bin/ssh", + "name": "ssh", + "command_line": "/usr/bin/ssh -l user 10.0.0.16", + "working_directory": "/home/ekoren", + "io": { + "text": "test" + } + }, + "url": { + "path": "/page", + "full": "https://mydomain.com/app/page", + "original": "https://mydomain.com/app/original" + }, + "email": { + "message_id": "81ce15$8r2j59@mail01.example.com" + }, + "parent": { + "url": { + "path": "/page", + "full": "https://mydomain.com/app/page", + "original": "https://mydomain.com/app/original" + }, + "body": { + "content": "Some content" + }, + "file": { + "path": "/path/to/my/file", + "target_path": "/path/to/my/file" + }, + "code_signature.timestamp": "2023-07-05", + "registry.data.strings": ["C:\\\\rta\\\\red_ttp\\\\bin\\\\myapp.exe"] + }, + "error": { + "stack_trace": "co.elastic.test.TestClass error:\\n at co.elastic.test.BaseTestClass", + "message": "Error occurred" + }, + "file": { + "path": "/path/to/my/file", + "target_path": "/path/to/my/file" + }, + "os": { + "full": "Mac OS Mojave" + }, + "user_agent": { + "original": "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15" + }, + "user": { + "full_name": "John Doe" + }, + "vulnerability": { + "score": { + "base": 5.5, + "temporal": 5.5, + "version": "2.0" + }, + "textual_score": "bad" + }, + "host": { + "cpu": { + "usage": 0.68 + } + }, + "geo": { + "location": { + "lon": -73.614830, + "lat": 45.505918 + } + }, + "data_stream": { + "dataset": "nginx.access", + "namespace": "production", + "custom": "whatever" + }, + "structured_data": { + "key1": "value1", + "key2": ["value2", "value3"] + }, + "exports": { + "key": "value" + }, + "top_level_imports": { + "key": "value" + }, + "nested": { + "imports": { + "key": "value" + } + }, + "numeric_as_string": "42", + "socket": { + "ip": "127.0.0.1", + "remote_ip": "187.8.8.8" + } + } + """); + List results = searchDocs(client, DATA_STREAM_NAME, """ + { + "query": { + "term": { + "test": { + "value": "mockup-ecs-log" + } + } + }, + "fields": ["start-timestamp", "start_timestamp"], + "script_fields": { + "data_stream_type": { + "script": { + "source": "doc['data_stream.type'].value" + } + } + } + } + """); + assertThat(results.size(), is(1)); + Map fields = ((Map>) results.get(0)).get("fields"); + List ignored = ((Map>) results.get(0)).get("_ignored"); + Map ignoredFieldValues = ((Map>) results.get(0)).get("ignored_field_values"); + + // the ECS date dynamic template enforces mapping of "*_timestamp" fields to a date type + assertThat(ignored.size(), is(2)); + assertThat(ignored.get(0), is("start_timestamp")); + List startTimestampValues = (List) ignoredFieldValues.get("start_timestamp"); + assertThat(startTimestampValues.size(), is(1)); + assertThat(startTimestampValues.get(0), is("not a date")); + // "start-timestamp" doesn't match the ECS dynamic mapping pattern "*_timestamp" + assertThat(fields.get("start-timestamp"), is(List.of("not a date"))); + // verify that data_stream.type has the correct constant_keyword value + assertThat(fields.get("data_stream_type"), is(List.of("logs"))); + assertThat(ignored.get(1), is("vulnerability.textual_score")); + + Map properties = getMappingProperties(client, backingIndex); + assertThat(getValueFromPath(properties, List.of("error", "properties", "message", "type")), is("match_only_text")); + assertThat( + getValueFromPath(properties, List.of("registry", "properties", "data", "properties", "strings", "type")), + is("wildcard") + ); + assertThat( + getValueFromPath( + properties, + List.of("parent", "properties", "registry", "properties", "data", "properties", "strings", "type") + ), + is("wildcard") + ); + assertThat(getValueFromPath(properties, List.of("process", "properties", "io", "properties", "text", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("email", "properties", "message_id", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("url", "properties", "path", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("parent", "properties", "url", "properties", "path", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("url", "properties", "full", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("url", "properties", "full", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("parent", "properties", "url", "properties", "full", "type")), is("wildcard")); + assertThat( + getValueFromPath(properties, List.of("parent", "properties", "url", "properties", "full", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("url", "properties", "original", "type")), is("wildcard")); + assertThat( + getValueFromPath(properties, List.of("url", "properties", "original", "fields", "text", "type")), + is("match_only_text") + ); + assertThat( + getValueFromPath(properties, List.of("parent", "properties", "url", "properties", "original", "type")), + is("wildcard") + ); + assertThat( + getValueFromPath(properties, List.of("parent", "properties", "url", "properties", "original", "fields", "text", "type")), + is("match_only_text") + ); + assertThat( + getValueFromPath(properties, List.of("parent", "properties", "body", "properties", "content", "type")), + is("wildcard") + ); + assertThat( + getValueFromPath(properties, List.of("parent", "properties", "body", "properties", "content", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("process", "properties", "command_line", "type")), is("wildcard")); + assertThat( + getValueFromPath(properties, List.of("process", "properties", "command_line", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("error", "properties", "stack_trace", "type")), is("wildcard")); + assertThat( + getValueFromPath(properties, List.of("error", "properties", "stack_trace", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("file", "properties", "path", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("file", "properties", "path", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("parent", "properties", "file", "properties", "path", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("parent", "properties", "file", "properties", "path", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("file", "properties", "target_path", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("file", "properties", "target_path", "fields", "text", "type")), + is("match_only_text") + ); + assertThat( + getValueFromPath(properties, List.of("parent", "properties", "file", "properties", "target_path", "type")), + is("keyword") + ); + assertThat( + getValueFromPath( + properties, + List.of("parent", "properties", "file", "properties", "target_path", "fields", "text", "type") + ), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("os", "properties", "full", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("os", "properties", "full", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("user_agent", "properties", "original", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("user_agent", "properties", "original", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("process", "properties", "title", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("process", "properties", "title", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("process", "properties", "executable", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("process", "properties", "executable", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("process", "properties", "name", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("process", "properties", "name", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("process", "properties", "working_directory", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("process", "properties", "working_directory", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("user", "properties", "full_name", "type")), is("keyword")); + assertThat( + getValueFromPath(properties, List.of("user", "properties", "full_name", "fields", "text", "type")), + is("match_only_text") + ); + assertThat(getValueFromPath(properties, List.of("start_timestamp", "type")), is("date")); + // testing the default mapping of string input fields to keyword if not matching any pattern + assertThat(getValueFromPath(properties, List.of("start-timestamp", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("timestamp", "properties", "us", "type")), is("long")); + assertThat( + getValueFromPath(properties, List.of("parent", "properties", "code_signature", "properties", "timestamp", "type")), + is("date") + ); + assertThat( + getValueFromPath(properties, List.of("vulnerability", "properties", "score", "properties", "base", "type")), + is("float") + ); + assertThat( + getValueFromPath(properties, List.of("vulnerability", "properties", "score", "properties", "temporal", "type")), + is("float") + ); + assertThat( + getValueFromPath(properties, List.of("vulnerability", "properties", "score", "properties", "version", "type")), + is("keyword") + ); + assertThat(getValueFromPath(properties, List.of("vulnerability", "properties", "textual_score", "type")), is("float")); + assertThat( + getValueFromPath(properties, List.of("host", "properties", "cpu", "properties", "usage", "type")), + is("scaled_float") + ); + assertThat( + getValueFromPath(properties, List.of("host", "properties", "cpu", "properties", "usage", "scaling_factor")), + is(1000.0) + ); + assertThat(getValueFromPath(properties, List.of("geo", "properties", "location", "type")), is("geo_point")); + assertThat(getValueFromPath(properties, List.of("data_stream", "properties", "dataset", "type")), is("constant_keyword")); + assertThat(getValueFromPath(properties, List.of("data_stream", "properties", "namespace", "type")), is("constant_keyword")); + assertThat(getValueFromPath(properties, List.of("data_stream", "properties", "type", "type")), is("constant_keyword")); + // not one of the three data_stream fields that are explicitly mapped to constant_keyword + assertThat(getValueFromPath(properties, List.of("data_stream", "properties", "custom", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("structured_data", "type")), is("flattened")); + assertThat(getValueFromPath(properties, List.of("exports", "type")), is("flattened")); + assertThat(getValueFromPath(properties, List.of("top_level_imports", "type")), is("flattened")); + assertThat(getValueFromPath(properties, List.of("nested", "properties", "imports", "type")), is("flattened")); + // verifying the default mapping for strings into keyword, overriding the automatic numeric string detection + assertThat(getValueFromPath(properties, List.of("numeric_as_string", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("socket", "properties", "ip", "type")), is("ip")); + assertThat(getValueFromPath(properties, List.of("socket", "properties", "remote_ip", "type")), is("ip")); + } + } +} diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java index 5bb9c8b340ee9..cc8695b9e0e5b 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java @@ -21,6 +21,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.matchesRegex; +import static org.hamcrest.Matchers.nullValue; public class LogsDataStreamIT extends DisabledSecurityDataStreamTestCase { @@ -45,8 +46,8 @@ public void testDefaultLogsSettingAndMapping() throws Exception { // Extend the mapping and verify putMapping(client, backingIndex); Map mappingProperties = getMappingProperties(client, backingIndex); - assertThat(((Map) mappingProperties.get("@timestamp")).get("ignore_malformed"), equalTo(false)); - assertThat(((Map) mappingProperties.get("numeric_field")).get("type"), equalTo("integer")); + assertThat(getValueFromPath(mappingProperties, List.of("@timestamp", "ignore_malformed")), equalTo(false)); + assertThat(getValueFromPath(mappingProperties, List.of("numeric_field", "type")), equalTo("integer")); // Insert valid doc and verify successful indexing { @@ -149,11 +150,8 @@ public void testCustomMapping() throws Exception { // Verify that the new field from the custom component template is applied putMapping(client, backingIndex); Map mappingProperties = getMappingProperties(client, backingIndex); - assertThat(((Map) mappingProperties.get("numeric_field")).get("type"), equalTo("integer")); - assertThat( - ((Map) mappingProperties.get("socket")).get("properties"), - equalTo(Map.of("ip", Map.of("type", "keyword"))) - ); + assertThat(getValueFromPath(mappingProperties, List.of("numeric_field", "type")), equalTo("integer")); + assertThat(getValueFromPath(mappingProperties, List.of("socket", "properties", "ip", "type")), is("keyword")); // Insert valid doc and verify successful indexing { @@ -227,7 +225,7 @@ public void testLogsDefaultPipeline() throws Exception { // Verify mapping from custom logs Map mappingProperties = getMappingProperties(client, backingIndex); - assertThat(((Map) mappingProperties.get("@timestamp")).get("type"), equalTo("date")); + assertThat(getValueFromPath(mappingProperties, List.of("@timestamp", "type")), equalTo("date")); // no timestamp - testing default pipeline's @timestamp set processor { @@ -284,7 +282,358 @@ public void testLogsDefaultPipeline() throws Exception { } } - private static void waitForLogs(RestClient client) throws Exception { + @SuppressWarnings("unchecked") + public void testLogsMessagePipeline() throws Exception { + RestClient client = client(); + waitForLogs(client); + + { + Request request = new Request("PUT", "/_ingest/pipeline/logs@custom"); + request.setJsonEntity(""" + { + "processors": [ + { + "pipeline" : { + "name": "logs@json-message", + "description": "A pipeline that automatically parses JSON log events into top-level fields if they are such" + } + } + ] + } + """); + assertOK(client.performRequest(request)); + } + + String dataStreamName = "logs-generic-default"; + createDataStream(client, dataStreamName); + + { + indexDoc(client, dataStreamName, """ + { + "@timestamp":"2023-05-09T16:48:34.135Z", + "message":"json", + "log.level": "INFO", + "ecs.version": "1.6.0", + "service.name":"my-app", + "event.dataset":"my-app.RollingFile", + "process.thread.name":"main", + "log.logger":"root.pkg.MyApp" + } + """); + List results = searchDocs(client, dataStreamName, """ + { + "query": { + "term": { + "message": { + "value": "json" + } + } + }, + "fields": ["message"] + } + """); + assertThat(results.size(), is(1)); + Map source = ((Map>) results.get(0)).get("_source"); + Map fields = ((Map>) results.get(0)).get("fields"); + + // root field parsed from JSON should win + assertThat(source.get("@timestamp"), is("2023-05-09T16:48:34.135Z")); + assertThat(source.get("message"), is("json")); + assertThat(((List) fields.get("message")).get(0), is("json")); + + // successful access to subfields verifies that dot expansion is part of the pipeline + assertThat(source.get("log.level"), is("INFO")); + assertThat(source.get("ecs.version"), is("1.6.0")); + assertThat(source.get("service.name"), is("my-app")); + assertThat(source.get("event.dataset"), is("my-app.RollingFile")); + assertThat(source.get("process.thread.name"), is("main")); + assertThat(source.get("log.logger"), is("root.pkg.MyApp")); + // _tmp_json_message should be removed by the pipeline + assertThat(source.get("_tmp_json_message"), is(nullValue())); + } + + // test malformed-JSON parsing - parsing error should be ignored and the document should be indexed with original message + { + indexDoc(client, dataStreamName, """ + { + "@timestamp":"2023-05-10", + "test":"malformed_json", + "message": "{\\"@timestamp\\":\\"2023-05-09T16:48:34.135Z\\", \\"message\\":\\"malformed_json\\"}}" + } + """); + List results = searchDocs(client, dataStreamName, """ + { + "query": { + "term": { + "test": { + "value": "malformed_json" + } + } + } + } + """); + assertThat(results.size(), is(1)); + Map source = ((Map>) results.get(0)).get("_source"); + + // root field parsed from JSON should win + assertThat(source.get("@timestamp"), is("2023-05-10")); + assertThat(source.get("message"), is("{\"@timestamp\":\"2023-05-09T16:48:34.135Z\", \"message\":\"malformed_json\"}}")); + assertThat(source.get("_tmp_json_message"), is(nullValue())); + } + + // test non-string message field + { + indexDoc(client, dataStreamName, """ + { + "message": 42, + "test": "numeric_message" + } + """); + List results = searchDocs(client, dataStreamName, """ + { + "query": { + "term": { + "test": { + "value": "numeric_message" + } + } + }, + "fields": ["message"] + } + """); + assertThat(results.size(), is(1)); + Map source = ((Map>) results.get(0)).get("_source"); + Map fields = ((Map>) results.get(0)).get("fields"); + + assertThat(source.get("message"), is(42)); + assertThat(((List) fields.get("message")).get(0), is("42")); + } + } + + @SuppressWarnings("unchecked") + public void testNoSubobjects() throws Exception { + RestClient client = client(); + waitForLogs(client); + { + Request request = new Request("POST", "/_component_template/logs-test-subobjects-mappings"); + request.setJsonEntity(""" + { + "template": { + "settings": { + "mapping": { + "ignore_malformed": true + } + }, + "mappings": { + "subobjects": false, + "date_detection": false, + "properties": { + "data_stream.type": { + "type": "constant_keyword", + "value": "logs" + }, + "data_stream.dataset": { + "type": "constant_keyword" + }, + "data_stream.namespace": { + "type": "constant_keyword" + } + } + } + } + } + """); + assertOK(client.performRequest(request)); + } + { + Request request = new Request("POST", "/_index_template/logs-ecs-test-template"); + request.setJsonEntity(""" + { + "priority": 200, + "data_stream": {}, + "index_patterns": ["logs-*-*"], + "composed_of": ["logs-test-subobjects-mappings", "ecs@dynamic_templates"] + } + """); + assertOK(client.performRequest(request)); + } + String dataStream = "logs-ecs-test-subobjects"; + createDataStream(client, dataStream); + String backingIndexName = getWriteBackingIndex(client, dataStream); + + indexDoc(client, dataStream, """ + { + "@timestamp": "2023-06-12", + "start_timestamp": "2023-06-08", + "location" : "POINT (-71.34 41.12)", + "test": "flattened", + "test.start_timestamp": "not a date", + "test.start-timestamp": "not a date", + "registry.data.strings": ["C:\\\\rta\\\\red_ttp\\\\bin\\\\myapp.exe"], + "process.title": "ssh", + "process.executable": "/usr/bin/ssh", + "process.name": "ssh", + "process.command_line": "/usr/bin/ssh -l user 10.0.0.16", + "process.working_directory": "/home/ekoren", + "process.io.text": "test", + "url.path": "/page", + "url.full": "https://mydomain.com/app/page", + "url.original": "https://mydomain.com/app/original", + "email.message_id": "81ce15$8r2j59@mail01.example.com", + "parent.url.path": "/page", + "parent.url.full": "https://mydomain.com/app/page", + "parent.url.original": "https://mydomain.com/app/original", + "parent.body.content": "Some content", + "parent.file.path": "/path/to/my/file", + "parent.file.target_path": "/path/to/my/file", + "parent.registry.data.strings": ["C:\\\\rta\\\\red_ttp\\\\bin\\\\myapp.exe"], + "error.stack_trace": "co.elastic.test.TestClass error:\\n at co.elastic.test.BaseTestClass", + "error.message": "Error occurred", + "file.path": "/path/to/my/file", + "file.target_path": "/path/to/my/file", + "os.full": "Mac OS Mojave", + "user_agent.original": "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15", + "user.full_name": "John Doe", + "vulnerability.score.base": 5.5, + "vulnerability.score.temporal": 5.5, + "vulnerability.score.version": "2.0", + "vulnerability.textual_score": "bad", + "host.cpu.usage": 0.68, + "geo.location": [-73.614830, 45.505918], + "data_stream.dataset": "nginx.access", + "data_stream.namespace": "production", + "data_stream.custom": "whatever", + "structured_data": {"key1": "value1", "key2": ["value2", "value3"]}, + "exports": {"key": "value"}, + "top_level_imports": {"key": "value"}, + "nested.imports": {"key": "value"}, + "numeric_as_string": "42", + "socket.ip": "127.0.0.1", + "socket.remote_ip": "187.8.8.8" + } + """); + List hits = searchDocs(client, dataStream, """ + { + "query": { + "term": { + "test": { + "value": "flattened" + } + } + }, + "fields": [ + "data_stream.type", + "location", + "geo.location", + "test.start-timestamp", + "test.start_timestamp", + "vulnerability.textual_score" + ] + } + """); + assertThat(hits.size(), is(1)); + Map fields = ((Map>) hits.get(0)).get("fields"); + List ignored = ((Map>) hits.get(0)).get("_ignored"); + Map> ignoredFieldValues = ((Map>>) hits.get(0)).get("ignored_field_values"); + + // verify that data_stream.type has the correct constant_keyword value + assertThat(fields.get("data_stream.type"), is(List.of("logs"))); + // verify geo_point subfields evaluation + assertThat(((List>) fields.get("location")).get(0).get("type"), is("Point")); + List coordinates = ((List>>) fields.get("location")).get(0).get("coordinates"); + assertThat(coordinates.size(), is(2)); + assertThat(coordinates.get(0), equalTo(-71.34)); + assertThat(coordinates.get(1), equalTo(41.12)); + List geoLocation = (List) fields.get("geo.location"); + assertThat(((Map) geoLocation.get(0)).get("type"), is("Point")); + coordinates = ((Map>) geoLocation.get(0)).get("coordinates"); + assertThat(coordinates.size(), is(2)); + assertThat(coordinates.get(0), equalTo(-73.614830)); + assertThat(coordinates.get(1), equalTo(45.505918)); + // "start-timestamp" doesn't match the ECS dynamic mapping pattern "*_timestamp" + assertThat(fields.get("test.start-timestamp"), is(List.of("not a date"))); + assertThat(ignored.size(), is(2)); + assertThat(ignored.get(0), is("vulnerability.textual_score")); + // the ECS date dynamic template enforces mapping of "*_timestamp" fields to a date type + assertThat(ignored.get(1), is("test.start_timestamp")); + assertThat(ignoredFieldValues.get("test.start_timestamp").size(), is(1)); + assertThat(ignoredFieldValues.get("test.start_timestamp"), is(List.of("not a date"))); + assertThat(ignoredFieldValues.get("vulnerability.textual_score").size(), is(1)); + assertThat(ignoredFieldValues.get("vulnerability.textual_score").get(0), is("bad")); + + Map properties = getMappingProperties(client, backingIndexName); + assertThat(getValueFromPath(properties, List.of("error.message", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("registry.data.strings", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("parent.registry.data.strings", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("process.io.text", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("email.message_id", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("url.path", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("parent.url.path", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("url.full", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("url.full", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("parent.url.full", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("parent.url.full", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("url.original", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("url.original", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("parent.url.original", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("parent.url.original", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("parent.body.content", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("parent.body.content", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("process.command_line", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("process.command_line", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("error.stack_trace", "type")), is("wildcard")); + assertThat(getValueFromPath(properties, List.of("error.stack_trace", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("file.path", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("file.path", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("parent.file.path", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("parent.file.path", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("file.target_path", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("file.target_path", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("parent.file.target_path", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("parent.file.target_path", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("os.full", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("os.full", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("user_agent.original", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("user_agent.original", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("process.title", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("process.title", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("process.executable", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("process.executable", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("process.name", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("process.name", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("process.working_directory", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("process.working_directory", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("user.full_name", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("user.full_name", "fields", "text", "type")), is("match_only_text")); + assertThat(getValueFromPath(properties, List.of("start_timestamp", "type")), is("date")); + assertThat(getValueFromPath(properties, List.of("test.start_timestamp", "type")), is("date")); + // testing the default mapping of string input fields to keyword if not matching any pattern + assertThat(getValueFromPath(properties, List.of("test.start-timestamp", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("vulnerability.score.base", "type")), is("float")); + assertThat(getValueFromPath(properties, List.of("vulnerability.score.temporal", "type")), is("float")); + assertThat(getValueFromPath(properties, List.of("vulnerability.score.version", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("vulnerability.textual_score", "type")), is("float")); + assertThat(getValueFromPath(properties, List.of("host.cpu.usage", "type")), is("scaled_float")); + assertThat(getValueFromPath(properties, List.of("host.cpu.usage", "scaling_factor")), is(1000.0)); + assertThat(getValueFromPath(properties, List.of("location", "type")), is("geo_point")); + assertThat(getValueFromPath(properties, List.of("geo.location", "type")), is("geo_point")); + assertThat(getValueFromPath(properties, List.of("data_stream.dataset", "type")), is("constant_keyword")); + assertThat(getValueFromPath(properties, List.of("data_stream.namespace", "type")), is("constant_keyword")); + assertThat(getValueFromPath(properties, List.of("data_stream.type", "type")), is("constant_keyword")); + // not one of the three data_stream fields that are explicitly mapped to constant_keyword + assertThat(getValueFromPath(properties, List.of("data_stream.custom", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("structured_data", "type")), is("flattened")); + assertThat(getValueFromPath(properties, List.of("exports", "type")), is("flattened")); + assertThat(getValueFromPath(properties, List.of("top_level_imports", "type")), is("flattened")); + assertThat(getValueFromPath(properties, List.of("nested.imports", "type")), is("flattened")); + // verifying the default mapping for strings into keyword, overriding the automatic numeric string detection + assertThat(getValueFromPath(properties, List.of("numeric_as_string", "type")), is("keyword")); + assertThat(getValueFromPath(properties, List.of("socket.ip", "type")), is("ip")); + assertThat(getValueFromPath(properties, List.of("socket.remote_ip", "type")), is("ip")); + + } + + static void waitForLogs(RestClient client) throws Exception { assertBusy(() -> { try { Request request = new Request("GET", "_index_template/logs"); @@ -295,13 +644,13 @@ private static void waitForLogs(RestClient client) throws Exception { }); } - private static void createDataStream(RestClient client, String name) throws IOException { + static void createDataStream(RestClient client, String name) throws IOException { Request request = new Request("PUT", "_data_stream/" + name); assertOK(client.performRequest(request)); } @SuppressWarnings("unchecked") - private static String getWriteBackingIndex(RestClient client, String name) throws IOException { + static String getWriteBackingIndex(RestClient client, String name) throws IOException { Request request = new Request("GET", "_data_stream/" + name); List dataStreams = (List) entityAsMap(client.performRequest(request)).get("data_streams"); Map dataStream = (Map) dataStreams.get(0); @@ -310,12 +659,12 @@ private static String getWriteBackingIndex(RestClient client, String name) throw } @SuppressWarnings("unchecked") - private static Map getSettings(RestClient client, String indexName) throws IOException { + static Map getSettings(RestClient client, String indexName) throws IOException { Request request = new Request("GET", "/" + indexName + "/_settings?flat_settings"); return ((Map>) entityAsMap(client.performRequest(request)).get(indexName)).get("settings"); } - private static void putMapping(RestClient client, String indexName) throws IOException { + static void putMapping(RestClient client, String indexName) throws IOException { Request request = new Request("PUT", "/" + indexName + "/_mapping"); request.setJsonEntity(""" { @@ -330,24 +679,51 @@ private static void putMapping(RestClient client, String indexName) throws IOExc } @SuppressWarnings("unchecked") - private static Map getMappingProperties(RestClient client, String indexName) throws IOException { + static Map getMappingProperties(RestClient client, String indexName) throws IOException { Request request = new Request("GET", "/" + indexName + "/_mapping"); Map map = (Map) entityAsMap(client.performRequest(request)).get(indexName); Map mappings = (Map) map.get("mappings"); return (Map) mappings.get("properties"); } - private static void indexDoc(RestClient client, String dataStreamName, String doc) throws IOException { + static void indexDoc(RestClient client, String dataStreamName, String doc) throws IOException { Request request = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true"); request.setJsonEntity(doc); assertOK(client.performRequest(request)); } @SuppressWarnings("unchecked") - private static List searchDocs(RestClient client, String dataStreamName, String query) throws IOException { + static List searchDocs(RestClient client, String dataStreamName, String query) throws IOException { Request request = new Request("GET", "/" + dataStreamName + "/_search"); request.setJsonEntity(query); Map hits = (Map) entityAsMap(client.performRequest(request)).get("hits"); return (List) hits.get("hits"); } + + @SuppressWarnings("unchecked") + static Object getValueFromPath(Map map, List path) { + Map current = map; + for (int i = 0; i < path.size(); i++) { + Object value = current.get(path.get(i)); + if (i == path.size() - 1) { + return value; + } + if (value == null) { + throw new IllegalStateException("Path " + String.join(".", path) + " was not found in " + map); + } + if (value instanceof Map next) { + current = (Map) next; + } else { + throw new IllegalStateException( + "Failed to reach the end of the path " + + String.join(".", path) + + " last reachable field was " + + path.get(i) + + " in " + + map + ); + } + } + return current; + } } diff --git a/modules/data-streams/src/javaRestTest/resources/ecs-logs/es-agent-ecs-log.json b/modules/data-streams/src/javaRestTest/resources/ecs-logs/es-agent-ecs-log.json new file mode 100644 index 0000000000000..29ae669e1290d --- /dev/null +++ b/modules/data-streams/src/javaRestTest/resources/ecs-logs/es-agent-ecs-log.json @@ -0,0 +1,118 @@ +{ + "@timestamp": "2023-05-16T13:49:40.377Z", + "test": "elastic-agent-log", + "container": { + "image": { + "name": "docker.elastic.co/beats/elastic-agent:8.9.0-SNAPSHOT" + }, + "runtime": "containerd", + "id": "bdabf58305b2b537d06b85764c588ff659190d875cb5470214bc16ba50ea1a4d" + }, + "kubernetes": { + "container": { + "name": "elastic-agent" + }, + "node": { + "uid": "0f4dd3b8-0b29-418e-ad7a-ebc55bc279ff", + "hostname": "multi-v1.27.1-worker", + "name": "multi-v1.27.1-worker", + "labels": { + "kubernetes_io/hostname": "multi-v1.27.1-worker", + "beta_kubernetes_io/os": "linux", + "kubernetes_io/arch": "arm64", + "kubernetes_io/os": "linux", + "beta_kubernetes_io/arch": "arm64" + } + }, + "pod": { + "uid": "c91d1354-27cf-40f3-a2d6-e2b75aa96bf2", + "ip": "172.18.0.4", + "test_ip": "172.18.0.5", + "name": "elastic-agent-managed-daemonset-jwktj" + }, + "namespace": "kube-system", + "namespace_uid": "63294aeb-b23f-429d-827c-e793ccf91024", + "daemonset": { + "name": "elastic-agent-managed-daemonset" + }, + "namespace_labels": { + "kubernetes_io/metadata_name": "kube-system" + }, + "labels": { + "controller-revision-hash": "7ff74fcd4b", + "pod-template-generation": "1", + "k8s-app": "elastic-agent" + } + }, + "agent": { + "name": "multi-v1.27.1-worker", + "id": "230358e2-6c5d-4675-9069-04feaddad64b", + "ephemeral_id": "e0934bfb-7e35-4bcc-a935-803643841213", + "type": "filebeat", + "version": "8.9.0" + }, + "log": { + "file": { + "path": "/var/log/containers/elastic-agent-managed-daemonset-jwktj_kube-system_elastic-agent-bdabf58305b2b537d06b85764c588ff659190d875cb5470214bc16ba50ea1a4d.log" + }, + "offset": 635247 + }, + "elastic_agent": { + "id": "230358e2-6c5d-4675-9069-04feaddad64b", + "version": "8.9.0", + "snapshot": true + }, + "message": "{\"log.level\":\"info\",\"@timestamp\":\"2023-05-16T13:49:40.374Z\",\"message\":\"Non-zero metrics in the last 30s\",\"component\":{\"binary\":\"metricbeat\",\"dataset\":\"elastic_agent.metricbeat\",\"id\":\"kubernetes/metrics-a92ab320-f3ed-11ed-9c8d-45656839f031\",\"type\":\"kubernetes/metrics\"},\"log\":{\"source\":\"kubernetes/metrics-a92ab320-f3ed-11ed-9c8d-45656839f031\"},\"log.logger\":\"monitoring\",\"log.origin\":{\"file.line\":187,\"file.name\":\"log/log.go\"},\"service.name\":\"metricbeat\",\"ecs.version\":\"1.6.0\"}", + "orchestrator": { + "cluster": { + "name": "multi-v1.27.1", + "url": "multi-v1.27.1-control-plane:6443" + } + }, + "input": { + "type": "filestream" + }, + "ecs": { + "version": "8.0.0" + }, + "stream": "stderr", + "data_stream": { + "namespace": "default", + "dataset": "kubernetes.container_logs" + }, + "host": { + "hostname": "multi-v1.27.1-worker", + "os": { + "kernel": "5.15.49-linuxkit", + "codename": "focal", + "name": "Ubuntu", + "type": "linux", + "family": "debian", + "version": "20.04.6 LTS (Focal Fossa)", + "platform": "ubuntu" + }, + "ip": [ + "10.244.2.1", + "10.244.2.1", + "172.18.0.4", + "fc00:f853:ccd:e793::4", + "fe80::42:acff:fe12:4", + "172.21.0.9" + ], + "containerized": false, + "name": "multi-v1.27.1-worker", + "id": "b2c527655d7746328f0686e25d3c413a", + "mac": [ + "02-42-AC-12-00-04", + "02-42-AC-15-00-09", + "32-7E-AA-73-39-04", + "EA-F3-80-1D-88-E3" + ], + "architecture": "aarch64" + }, + "event": { + "agent_id_status": "verified", + "ingested": "2023-05-16T13:49:47Z", + "dataset": "kubernetes.container_logs" + } +} \ No newline at end of file diff --git a/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java b/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java index 43438bfe9e5fb..fa7b4ca1a80c0 100644 --- a/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java +++ b/modules/data-streams/src/yamlRestTest/java/org/elasticsearch/datastreams/DataStreamsClientYamlTestSuiteIT.java @@ -9,7 +9,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -20,7 +19,6 @@ import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.junit.ClassRule; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99764") public class DataStreamsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public DataStreamsClientYamlTestSuiteIT(final ClientYamlTestCandidate testCandidate) { diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/230_logs_message_pipeline.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/230_logs_message_pipeline.yml deleted file mode 100644 index 6fd6f24a4ea14..0000000000000 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/230_logs_message_pipeline.yml +++ /dev/null @@ -1,114 +0,0 @@ ---- -Test log message JSON-parsing pipeline: - - do: - ingest.put_pipeline: - # opting in to use the JSON parsing pipeline for message field - id: "logs@custom" - body: > - { - "processors": [ - { - "pipeline" : { - "name": "logs@json-message", - "description": "A pipeline that automatically parses JSON log events into top-level fields if they are such" - } - } - ] - } - - - do: - indices.create_data_stream: - name: logs-generic-default - - is_true: acknowledged - - - do: - index: - index: logs-generic-default - refresh: true - body: - '@timestamp': '2023-05-10' - message: |- - { - "@timestamp":"2023-05-09T16:48:34.135Z", - "message":"json", - "log.level": "INFO", - "ecs.version": "1.6.0", - "service.name":"my-app", - "event.dataset":"my-app.RollingFile", - "process.thread.name":"main", - "log.logger":"root.pkg.MyApp" - } - - match: {result: "created"} - - - do: - search: - index: logs-generic-default - body: - query: - term: - message: - value: 'json' - fields: - - field: 'message' - - length: { hits.hits: 1 } - # root field parsed from JSON should win - - match: { hits.hits.0._source.@timestamp: '2023-05-09T16:48:34.135Z' } - - match: { hits.hits.0._source.message: 'json' } - - match: { hits.hits.0.fields.message.0: 'json' } - # successful access to subfields verifies that dot expansion is part of the pipeline - - match: { hits.hits.0._source.log.level: 'INFO' } - - match: { hits.hits.0._source.ecs.version: '1.6.0' } - - match: { hits.hits.0._source.service.name: 'my-app' } - - match: { hits.hits.0._source.event.dataset: 'my-app.RollingFile' } - - match: { hits.hits.0._source.process.thread.name: 'main' } - - match: { hits.hits.0._source.log.logger: 'root.pkg.MyApp' } - # _tmp_json_message should be removed by the pipeline - - match: { hits.hits.0._source._tmp_json_message: null } - - # test malformed-JSON parsing - parsing error should be ignored and the document should be indexed with original message - - do: - index: - index: logs-generic-default - refresh: true - body: - '@timestamp': '2023-05-10' - test: 'malformed_json' - message: '{"@timestamp":"2023-05-09T16:48:34.135Z", "message":"malformed_json"}}' - - match: {result: "created"} - - - do: - search: - index: logs-generic-default - body: - query: - term: - test: - value: 'malformed_json' - - length: { hits.hits: 1 } - - match: { hits.hits.0._source.@timestamp: '2023-05-10' } - - match: { hits.hits.0._source.message: '{"@timestamp":"2023-05-09T16:48:34.135Z", "message":"malformed_json"}}' } - - match: { hits.hits.0._source._tmp_json_message: null } - - # test non-string message field - - do: - index: - index: logs-generic-default - refresh: true - body: - test: 'numeric_message' - message: 42 - - match: {result: "created"} - - - do: - search: - index: logs-generic-default - body: - query: - term: - test: - value: 'numeric_message' - fields: - - field: 'message' - - length: { hits.hits: 1 } - - match: { hits.hits.0._source.message: 42 } - - match: { hits.hits.0.fields.message.0: '42' } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/240_logs_ecs_mappings.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/240_logs_ecs_mappings.yml deleted file mode 100644 index 538e362ed9ec0..0000000000000 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/240_logs_ecs_mappings.yml +++ /dev/null @@ -1,406 +0,0 @@ -setup: - - do: - ingest.put_pipeline: - # opting in to use the JSON parsing pipeline for message field - id: "logs@custom" - body: > - { - "processors": [ - { - "pipeline" : { - "name": "logs@json-message", - "description": "A pipeline that automatically parses JSON log events into top-level fields if they are such" - } - } - ] - } - - - do: - indices.create_data_stream: - name: logs-generic-default - ---- -Test Elastic Agent log ECS mappings: - - skip: - version: all - reason: https://github.com/elastic/elasticsearch/issues/97795 - - do: - indices.get_data_stream: - name: logs-generic-default - - set: { data_streams.0.indices.0.index_name: idx0name } - - - do: - index: - index: logs-generic-default - refresh: true - body: > - { - "@timestamp": "2023-05-16T13:49:40.377Z", - "test": "elastic-agent-log", - "container": { - "image": { - "name": "docker.elastic.co/beats/elastic-agent:8.9.0-SNAPSHOT" - }, - "runtime": "containerd", - "id": "bdabf58305b2b537d06b85764c588ff659190d875cb5470214bc16ba50ea1a4d" - }, - "kubernetes": { - "container": { - "name": "elastic-agent" - }, - "node": { - "uid": "0f4dd3b8-0b29-418e-ad7a-ebc55bc279ff", - "hostname": "multi-v1.27.1-worker", - "name": "multi-v1.27.1-worker", - "labels": { - "kubernetes_io/hostname": "multi-v1.27.1-worker", - "beta_kubernetes_io/os": "linux", - "kubernetes_io/arch": "arm64", - "kubernetes_io/os": "linux", - "beta_kubernetes_io/arch": "arm64" - } - }, - "pod": { - "uid": "c91d1354-27cf-40f3-a2d6-e2b75aa96bf2", - "ip": "172.18.0.4", - "test_ip": "172.18.0.5", - "name": "elastic-agent-managed-daemonset-jwktj" - }, - "namespace": "kube-system", - "namespace_uid": "63294aeb-b23f-429d-827c-e793ccf91024", - "daemonset": { - "name": "elastic-agent-managed-daemonset" - }, - "namespace_labels": { - "kubernetes_io/metadata_name": "kube-system" - }, - "labels": { - "controller-revision-hash": "7ff74fcd4b", - "pod-template-generation": "1", - "k8s-app": "elastic-agent" - } - }, - "agent": { - "name": "multi-v1.27.1-worker", - "id": "230358e2-6c5d-4675-9069-04feaddad64b", - "ephemeral_id": "e0934bfb-7e35-4bcc-a935-803643841213", - "type": "filebeat", - "version": "8.9.0" - }, - "log": { - "file": { - "path": "/var/log/containers/elastic-agent-managed-daemonset-jwktj_kube-system_elastic-agent-bdabf58305b2b537d06b85764c588ff659190d875cb5470214bc16ba50ea1a4d.log" - }, - "offset": 635247 - }, - "elastic_agent": { - "id": "230358e2-6c5d-4675-9069-04feaddad64b", - "version": "8.9.0", - "snapshot": true - }, - "message": "{\"log.level\":\"info\",\"@timestamp\":\"2023-05-16T13:49:40.374Z\",\"message\":\"Non-zero metrics in the last 30s\",\"component\":{\"binary\":\"metricbeat\",\"dataset\":\"elastic_agent.metricbeat\",\"id\":\"kubernetes/metrics-a92ab320-f3ed-11ed-9c8d-45656839f031\",\"type\":\"kubernetes/metrics\"},\"log\":{\"source\":\"kubernetes/metrics-a92ab320-f3ed-11ed-9c8d-45656839f031\"},\"log.logger\":\"monitoring\",\"log.origin\":{\"file.line\":187,\"file.name\":\"log/log.go\"},\"service.name\":\"metricbeat\",\"ecs.version\":\"1.6.0\"}", - "orchestrator": { - "cluster": { - "name": "multi-v1.27.1", - "url": "multi-v1.27.1-control-plane:6443" - } - }, - "input": { - "type": "filestream" - }, - "ecs": { - "version": "8.0.0" - }, - "stream": "stderr", - "data_stream": { - "namespace": "default", - "dataset": "kubernetes.container_logs" - }, - "host": { - "hostname": "multi-v1.27.1-worker", - "os": { - "kernel": "5.15.49-linuxkit", - "codename": "focal", - "name": "Ubuntu", - "type": "linux", - "family": "debian", - "version": "20.04.6 LTS (Focal Fossa)", - "platform": "ubuntu" - }, - "ip": [ - "10.244.2.1", - "10.244.2.1", - "172.18.0.4", - "fc00:f853:ccd:e793::4", - "fe80::42:acff:fe12:4", - "172.21.0.9" - ], - "containerized": false, - "name": "multi-v1.27.1-worker", - "id": "b2c527655d7746328f0686e25d3c413a", - "mac": [ - "02-42-AC-12-00-04", - "02-42-AC-15-00-09", - "32-7E-AA-73-39-04", - "EA-F3-80-1D-88-E3" - ], - "architecture": "aarch64" - }, - "event": { - "agent_id_status": "verified", - "ingested": "2023-05-16T13:49:47Z", - "dataset": "kubernetes.container_logs" - } - } - - match: {result: "created"} - - - do: - search: - index: logs-generic-default - body: - query: - term: - test: - value: 'elastic-agent-log' - fields: - - field: 'message' - - length: { hits.hits: 1 } - # timestamp from deserialized JSON message field should win - - match: { hits.hits.0._source.@timestamp: '2023-05-16T13:49:40.374Z' } - - match: { hits.hits.0._source.kubernetes.pod.name: 'elastic-agent-managed-daemonset-jwktj' } - # expecting the extracted message from within the original JSON-formatted message - - match: { hits.hits.0.fields.message.0: 'Non-zero metrics in the last 30s' } - - - do: - indices.get_mapping: - index: logs-generic-default - - match: { .$idx0name.mappings.properties.@timestamp.type: "date" } - - match: { .$idx0name.mappings.properties.message.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.kubernetes.properties.pod.properties.name.type: "keyword" } - - match: { .$idx0name.mappings.properties.kubernetes.properties.pod.properties.ip.type: "ip" } - - match: { .$idx0name.mappings.properties.kubernetes.properties.pod.properties.test_ip.type: "ip" } - - match: { .$idx0name.mappings.properties.kubernetes.properties.labels.properties.pod-template-generation.type: "keyword" } - - match: { .$idx0name.mappings.properties.log.properties.file.properties.path.type: "keyword" } - - match: { .$idx0name.mappings.properties.log.properties.file.properties.path.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.host.properties.os.properties.name.type: "keyword" } - - match: { .$idx0name.mappings.properties.host.properties.os.properties.name.fields.text.type: "match_only_text" } - ---- -Test general mockup ECS mappings: - - do: - indices.get_data_stream: - name: logs-generic-default - - set: { data_streams.0.indices.0.index_name: idx0name } - - - do: - index: - index: logs-generic-default - refresh: true - body: > - { - "start_timestamp": "not a date", - "start-timestamp": "not a date", - "timestamp.us": 1688550340718000, - "test": "mockup-ecs-log", - "registry": { - "data": { - "strings": ["C:\\rta\\red_ttp\\bin\\myapp.exe"] - } - }, - "process": { - "title": "ssh", - "executable": "/usr/bin/ssh", - "name": "ssh", - "command_line": "/usr/bin/ssh -l user 10.0.0.16", - "working_directory": "/home/ekoren", - "io": { - "text": "test" - } - }, - "url": { - "path": "/page", - "full": "https://mydomain.com/app/page", - "original": "https://mydomain.com/app/original" - }, - "email": { - "message_id": "81ce15$8r2j59@mail01.example.com" - }, - "parent": { - "url": { - "path": "/page", - "full": "https://mydomain.com/app/page", - "original": "https://mydomain.com/app/original" - }, - "body": { - "content": "Some content" - }, - "file": { - "path": "/path/to/my/file", - "target_path": "/path/to/my/file" - }, - "code_signature.timestamp": "2023-07-05", - "registry.data.strings": ["C:\\rta\\red_ttp\\bin\\myapp.exe"] - }, - "error": { - "stack_trace": "co.elastic.test.TestClass error:\n at co.elastic.test.BaseTestClass", - "message": "Error occurred" - }, - "file": { - "path": "/path/to/my/file", - "target_path": "/path/to/my/file" - }, - "os": { - "full": "Mac OS Mojave" - }, - "user_agent": { - "original": "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15" - }, - "user": { - "full_name": "John Doe" - }, - "vulnerability": { - "score": { - "base": 5.5, - "temporal": 5.5, - "version": "2.0" - }, - "textual_score": "bad" - }, - "host": { - "cpu": { - "usage": 0.68 - } - }, - "geo": { - "location": { - "lon": -73.614830, - "lat": 45.505918 - } - }, - "data_stream": { - "dataset": "nginx.access", - "namespace": "production", - "custom": "whatever" - }, - "structured_data": { - "key1": "value1", - "key2": ["value2", "value3"] - }, - "exports": { - "key": "value" - }, - "top_level_imports": { - "key": "value" - }, - "nested": { - "imports": { - "key": "value" - } - }, - "numeric_as_string": "42", - "socket": { - "ip": "127.0.0.1", - "remote_ip": "187.8.8.8" - } - } - - match: {result: "created"} - - - do: - search: - index: logs-generic-default - body: - query: - term: - test: - value: 'mockup-ecs-log' - fields: - - field: 'start_timestamp' - - field: 'start-timestamp' - script_fields: - data_stream_type: - script: - source: "doc['data_stream.type'].value" - - length: { hits.hits: 1 } - # the ECS date dynamic template enforces mapping of "*_timestamp" fields to a date type - - length: { hits.hits.0._ignored: 2 } - - match: { hits.hits.0._ignored.0: 'start_timestamp' } - - length: { hits.hits.0.ignored_field_values.start_timestamp: 1 } - - match: { hits.hits.0.ignored_field_values.start_timestamp.0: 'not a date' } - # "start-timestamp" doesn't match the ECS dynamic mapping pattern "*_timestamp" - - match: { hits.hits.0.fields.start-timestamp.0: 'not a date' } - # verify that data_stream.type has the correct constant_keyword value - - match: { hits.hits.0.fields.data_stream_type.0: 'logs' } - - match: { hits.hits.0._ignored.1: 'vulnerability.textual_score' } - - - do: - indices.get_mapping: - index: logs-generic-default - - match: { .$idx0name.mappings.properties.error.properties.message.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.registry.properties.data.properties.strings.type: "wildcard" } - - match: { .$idx0name.mappings.properties.parent.properties.registry.properties.data.properties.strings.type: "wildcard" } - - match: { .$idx0name.mappings.properties.process.properties.io.properties.text.type: "wildcard" } - - match: { .$idx0name.mappings.properties.email.properties.message_id.type: "wildcard" } - - match: { .$idx0name.mappings.properties.url.properties.path.type: "wildcard" } - - match: { .$idx0name.mappings.properties.parent.properties.url.properties.path.type: "wildcard" } - - match: { .$idx0name.mappings.properties.url.properties.full.type: "wildcard" } - - match: { .$idx0name.mappings.properties.url.properties.full.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.parent.properties.url.properties.full.type: "wildcard" } - - match: { .$idx0name.mappings.properties.parent.properties.url.properties.full.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.url.properties.original.type: "wildcard" } - - match: { .$idx0name.mappings.properties.url.properties.original.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.parent.properties.url.properties.original.type: "wildcard" } - - match: { .$idx0name.mappings.properties.parent.properties.url.properties.original.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.parent.properties.body.properties.content.type: "wildcard" } - - match: { .$idx0name.mappings.properties.parent.properties.body.properties.content.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.process.properties.command_line.type: "wildcard" } - - match: { .$idx0name.mappings.properties.process.properties.command_line.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.error.properties.stack_trace.type: "wildcard" } - - match: { .$idx0name.mappings.properties.error.properties.stack_trace.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.file.properties.path.type: "keyword" } - - match: { .$idx0name.mappings.properties.file.properties.path.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.parent.properties.file.properties.path.type: "keyword" } - - match: { .$idx0name.mappings.properties.parent.properties.file.properties.path.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.file.properties.target_path.type: "keyword" } - - match: { .$idx0name.mappings.properties.file.properties.target_path.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.parent.properties.file.properties.target_path.type: "keyword" } - - match: { .$idx0name.mappings.properties.parent.properties.file.properties.target_path.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.os.properties.full.type: "keyword" } - - match: { .$idx0name.mappings.properties.os.properties.full.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.user_agent.properties.original.type: "keyword" } - - match: { .$idx0name.mappings.properties.user_agent.properties.original.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.process.properties.title.type: "keyword" } - - match: { .$idx0name.mappings.properties.process.properties.title.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.process.properties.executable.type: "keyword" } - - match: { .$idx0name.mappings.properties.process.properties.executable.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.process.properties.name.type: "keyword" } - - match: { .$idx0name.mappings.properties.process.properties.name.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.process.properties.working_directory.type: "keyword" } - - match: { .$idx0name.mappings.properties.process.properties.working_directory.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.user.properties.full_name.type: "keyword" } - - match: { .$idx0name.mappings.properties.user.properties.full_name.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.start_timestamp.type: "date" } - # testing the default mapping of string input fields to keyword if not matching any pattern - - match: { .$idx0name.mappings.properties.start-timestamp.type: "keyword" } - - match: { .$idx0name.mappings.properties.timestamp.properties.us.type: "long" } - - match: { .$idx0name.mappings.properties.parent.properties.code_signature.properties.timestamp.type: "date" } - - match: { .$idx0name.mappings.properties.vulnerability.properties.score.properties.base.type: "float" } - - match: { .$idx0name.mappings.properties.vulnerability.properties.score.properties.temporal.type: "float" } - - match: { .$idx0name.mappings.properties.vulnerability.properties.score.properties.version.type: "keyword" } - - match: { .$idx0name.mappings.properties.vulnerability.properties.textual_score.type: "float" } - - match: { .$idx0name.mappings.properties.host.properties.cpu.properties.usage.type: "scaled_float" } - - match: { .$idx0name.mappings.properties.host.properties.cpu.properties.usage.scaling_factor: 1000 } - - match: { .$idx0name.mappings.properties.geo.properties.location.type: "geo_point" } - - match: { .$idx0name.mappings.properties.data_stream.properties.dataset.type: "constant_keyword" } - - match: { .$idx0name.mappings.properties.data_stream.properties.namespace.type: "constant_keyword" } - - match: { .$idx0name.mappings.properties.data_stream.properties.type.type: "constant_keyword" } - # not one of the three data_stream fields that are explicitly mapped to constant_keyword - - match: { .$idx0name.mappings.properties.data_stream.properties.custom.type: "keyword" } - - match: { .$idx0name.mappings.properties.structured_data.type: "flattened" } - - match: { .$idx0name.mappings.properties.exports.type: "flattened" } - - match: { .$idx0name.mappings.properties.top_level_imports.type: "flattened" } - - match: { .$idx0name.mappings.properties.nested.properties.imports.type: "flattened" } - # verifying the default mapping for strings into keyword, overriding the automatic numeric string detection - - match: { .$idx0name.mappings.properties.numeric_as_string.type: "keyword" } - - match: { .$idx0name.mappings.properties.socket.properties.ip.type: "ip" } - - match: { .$idx0name.mappings.properties.socket.properties.remote_ip.type: "ip" } - diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/250_logs_no_subobjects.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/250_logs_no_subobjects.yml deleted file mode 100644 index 607693e9f9955..0000000000000 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/250_logs_no_subobjects.yml +++ /dev/null @@ -1,218 +0,0 @@ ---- -Test flattened document with subobjects-false: -# NOTE: this doesn't work. In order to run this test set "subobjects: false" through logs-mappings.json - - skip: - features: allowed_warnings - - - do: - cluster.put_component_template: - name: logs-test-subobjects-mappings - body: - template: - settings: - mapping: - ignore_malformed: true - mappings: - subobjects: false - date_detection: false - properties: - data_stream.type: - type: constant_keyword - value: logs - data_stream.dataset: - type: constant_keyword - data_stream.namespace: - type: constant_keyword - - - do: - allowed_warnings: - - "index template [logs-ecs-test-template] has index patterns [logs-*-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [logs-ecs-test-template] will take precedence during new index creation" - indices.put_index_template: - name: logs-ecs-test-template - body: - priority: 200 - data_stream: {} - index_patterns: - - logs-*-* - composed_of: - - logs-test-subobjects-mappings - - ecs@dynamic_templates - - - do: - indices.create_data_stream: - name: logs-ecs-test-subobjects - - is_true: acknowledged - - - do: - indices.get_data_stream: - name: logs-ecs-test-subobjects - - set: { data_streams.0.indices.0.index_name: idx0name } - - - do: - index: - index: logs-ecs-test-subobjects - refresh: true - body: > - { - "@timestamp": "2023-06-12", - "start_timestamp": "2023-06-08", - "location" : "POINT (-71.34 41.12)", - "test": "flattened", - "test.start_timestamp": "not a date", - "test.start-timestamp": "not a date", - "registry.data.strings": ["C:\\rta\\red_ttp\\bin\\myapp.exe"], - "process.title": "ssh", - "process.executable": "/usr/bin/ssh", - "process.name": "ssh", - "process.command_line": "/usr/bin/ssh -l user 10.0.0.16", - "process.working_directory": "/home/ekoren", - "process.io.text": "test", - "url.path": "/page", - "url.full": "https://mydomain.com/app/page", - "url.original": "https://mydomain.com/app/original", - "email.message_id": "81ce15$8r2j59@mail01.example.com", - "parent.url.path": "/page", - "parent.url.full": "https://mydomain.com/app/page", - "parent.url.original": "https://mydomain.com/app/original", - "parent.body.content": "Some content", - "parent.file.path": "/path/to/my/file", - "parent.file.target_path": "/path/to/my/file", - "parent.registry.data.strings": ["C:\\rta\\red_ttp\\bin\\myapp.exe"], - "error.stack_trace": "co.elastic.test.TestClass error:\n at co.elastic.test.BaseTestClass", - "error.message": "Error occurred", - "file.path": "/path/to/my/file", - "file.target_path": "/path/to/my/file", - "os.full": "Mac OS Mojave", - "user_agent.original": "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15", - "user.full_name": "John Doe", - "vulnerability.score.base": 5.5, - "vulnerability.score.temporal": 5.5, - "vulnerability.score.version": "2.0", - "vulnerability.textual_score": "bad", - "host.cpu.usage": 0.68, - "geo.location": [-73.614830, 45.505918], - "data_stream.dataset": "nginx.access", - "data_stream.namespace": "production", - "data_stream.custom": "whatever", - "structured_data": {"key1": "value1", "key2": ["value2", "value3"]}, - "exports": {"key": "value"}, - "top_level_imports": {"key": "value"}, - "nested.imports": {"key": "value"}, - "numeric_as_string": "42", - "socket.ip": "127.0.0.1", - "socket.remote_ip": "187.8.8.8" - } - - match: {result: "created"} - - - do: - search: - index: logs-ecs-test-subobjects - body: - query: - term: - test: - value: 'flattened' - fields: - - field: 'data_stream.type' - - field: 'location' - - field: 'geo.location' - - field: 'test.start-timestamp' - - field: 'test.start_timestamp' - - field: 'vulnerability.textual_score' - - length: { hits.hits: 1 } - # verify that data_stream.type has the correct constant_keyword value - - match: { hits.hits.0.fields.data_stream\.type.0: 'logs' } - # verify geo_point subfields evaluation - - match: { hits.hits.0.fields.location.0.type: 'Point' } - - length: { hits.hits.0.fields.location.0.coordinates: 2 } - - match: { hits.hits.0.fields.location.0.coordinates.0: -71.34 } - - match: { hits.hits.0.fields.location.0.coordinates.1: 41.12 } - - match: { hits.hits.0.fields.geo\.location.0.type: 'Point' } - - length: { hits.hits.0.fields.geo\.location.0.coordinates: 2 } - - match: { hits.hits.0.fields.geo\.location.0.coordinates.0: -73.614830 } - - match: { hits.hits.0.fields.geo\.location.0.coordinates.1: 45.505918 } - # "start-timestamp" doesn't match the ECS dynamic mapping pattern "*_timestamp" - # TODO: uncomment once https://github.com/elastic/elasticsearch/issues/96700 gets resolved - # - match: { hits.hits.0.fields.test\.start-timestamp.0: 'not a date' } - - length: { hits.hits.0._ignored: 2 } - - match: { hits.hits.0._ignored.0: 'vulnerability.textual_score' } - # the ECS date dynamic template enforces mapping of "*_timestamp" fields to a date type - - match: { hits.hits.0._ignored.1: 'test.start_timestamp' } - - length: { hits.hits.0.ignored_field_values.test\.start_timestamp: 1 } - # TODO: uncomment once https://github.com/elastic/elasticsearch/issues/96700 gets resolved - # - match: { hits.hits.0.ignored_field_values.test\.start_timestamp.0: 'not a date' } - - length: { hits.hits.0.ignored_field_values.vulnerability\.textual_score: 1 } - - match: { hits.hits.0.ignored_field_values.vulnerability\.textual_score.0: 'bad' } - - - do: - indices.get_mapping: - index: logs-ecs-test-subobjects - - match: { .$idx0name.mappings.properties.error\.message.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.registry\.data\.strings.type: "wildcard" } - - match: { .$idx0name.mappings.properties.parent\.registry\.data\.strings.type: "wildcard" } - - match: { .$idx0name.mappings.properties.process\.io\.text.type: "wildcard" } - - match: { .$idx0name.mappings.properties.email\.message_id.type: "wildcard" } - - match: { .$idx0name.mappings.properties.url\.path.type: "wildcard" } - - match: { .$idx0name.mappings.properties.parent\.url\.path.type: "wildcard" } - - match: { .$idx0name.mappings.properties.url\.full.type: "wildcard" } - - match: { .$idx0name.mappings.properties.url\.full.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.parent\.url\.full.type: "wildcard" } - - match: { .$idx0name.mappings.properties.parent\.url\.full.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.url\.original.type: "wildcard" } - - match: { .$idx0name.mappings.properties.url\.original.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.parent\.url\.original.type: "wildcard" } - - match: { .$idx0name.mappings.properties.parent\.url\.original.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.parent\.body\.content.type: "wildcard" } - - match: { .$idx0name.mappings.properties.parent\.body\.content.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.process\.command_line.type: "wildcard" } - - match: { .$idx0name.mappings.properties.process\.command_line.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.error\.stack_trace.type: "wildcard" } - - match: { .$idx0name.mappings.properties.error\.stack_trace.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.file\.path.type: "keyword" } - - match: { .$idx0name.mappings.properties.file\.path.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.parent\.file\.path.type: "keyword" } - - match: { .$idx0name.mappings.properties.parent\.file\.path.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.file\.target_path.type: "keyword" } - - match: { .$idx0name.mappings.properties.file\.target_path.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.parent\.file\.target_path.type: "keyword" } - - match: { .$idx0name.mappings.properties.parent\.file\.target_path.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.os\.full.type: "keyword" } - - match: { .$idx0name.mappings.properties.os\.full.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.user_agent\.original.type: "keyword" } - - match: { .$idx0name.mappings.properties.user_agent\.original.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.process\.title.type: "keyword" } - - match: { .$idx0name.mappings.properties.process\.title.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.process\.executable.type: "keyword" } - - match: { .$idx0name.mappings.properties.process\.executable.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.process\.name.type: "keyword" } - - match: { .$idx0name.mappings.properties.process\.name.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.process\.working_directory.type: "keyword" } - - match: { .$idx0name.mappings.properties.process\.working_directory.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.user\.full_name.type: "keyword" } - - match: { .$idx0name.mappings.properties.user\.full_name.fields.text.type: "match_only_text" } - - match: { .$idx0name.mappings.properties.start_timestamp.type: "date" } - - match: { .$idx0name.mappings.properties.test\.start_timestamp.type: "date" } - # testing the default mapping of string input fields to keyword if not matching any pattern - - match: { .$idx0name.mappings.properties.test\.start-timestamp.type: "keyword" } - - match: { .$idx0name.mappings.properties.vulnerability\.score\.base.type: "float" } - - match: { .$idx0name.mappings.properties.vulnerability\.score\.temporal.type: "float" } - - match: { .$idx0name.mappings.properties.vulnerability\.score\.version.type: "keyword" } - - match: { .$idx0name.mappings.properties.vulnerability\.textual_score.type: "float" } - - match: { .$idx0name.mappings.properties.host\.cpu\.usage.type: "scaled_float" } - - match: { .$idx0name.mappings.properties.host\.cpu\.usage.scaling_factor: 1000 } - - match: { .$idx0name.mappings.properties.location.type: "geo_point" } - - match: { .$idx0name.mappings.properties.geo\.location.type: "geo_point" } - - match: { .$idx0name.mappings.properties.data_stream\.dataset.type: "constant_keyword" } - - match: { .$idx0name.mappings.properties.data_stream\.namespace.type: "constant_keyword" } - - match: { .$idx0name.mappings.properties.data_stream\.type.type: "constant_keyword" } - # not one of the three data_stream fields that are explicitly mapped to constant_keyword - - match: { .$idx0name.mappings.properties.data_stream\.custom.type: "keyword" } - - match: { .$idx0name.mappings.properties.structured_data.type: "flattened" } - - match: { .$idx0name.mappings.properties.exports.type: "flattened" } - - match: { .$idx0name.mappings.properties.top_level_imports.type: "flattened" } - - match: { .$idx0name.mappings.properties.nested\.imports.type: "flattened" } - # verifying the default mapping for strings into keyword, overriding the automatic numeric string detection - - match: { .$idx0name.mappings.properties.numeric_as_string.type: "keyword" } - - match: { .$idx0name.mappings.properties.socket\.ip.type: "ip" } - - match: { .$idx0name.mappings.properties.socket\.remote_ip.type: "ip" } - diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml index 296c692fa2d49..1ea39087211dd 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml @@ -51,6 +51,9 @@ setup: --- "Get data stream with default lifecycle": + - skip: + version: all + reason: https://github.com/elastic/elasticsearch/pull/100187 - do: indices.get_data_lifecycle: From 24037d6ed682f55f94cb1417f5d6fa8489cffa11 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 11 Oct 2023 14:33:01 +0300 Subject: [PATCH 153/176] Exclude synthetic source test for TSDB from mixedClusterTests (#100592) * Don't print synthetic source in mapping for bwc tests * Move comment. * Don't print synthetic source in mapping for bwc tests #2 * Don't print synthetic source in mapping for bwc tests #2 * Revert "Don't print synthetic source in mapping for bwc tests #2" This reverts commit 034262c5d22229aa6e8a0b7e754fd806a521cfc4. * Revert "Don't print synthetic source in mapping for bwc tests #2" This reverts commit 44e815635e2565c0b042cfe558a7451226c89488. * Revert "Don't print synthetic source in mapping for bwc tests (#100572)" This reverts commit 9322ab9b9163f70c9bf832f1b0a1985121393cfe. * Exclude synthetic source test from mixedClusterTests * Update comment. --- qa/mixed-cluster/build.gradle | 6 ++++ .../index/mapper/SourceFieldMapper.java | 34 +++++++++---------- .../index/mapper/SourceFieldMapperTests.java | 8 ----- .../query/SearchExecutionContextTests.java | 2 +- 4 files changed, 24 insertions(+), 26 deletions(-) diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 08d64e2b9353b..13256179b0a2b 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -41,6 +41,12 @@ excludeList.add('aggregations/filter/Standard queries get cached') excludeList.add('aggregations/filter/Terms lookup gets cached') excludeList.add('aggregations/filters_bucket/cache hits') +// The test checks that tsdb mappings report source as synthetic. +// It is supposed to be skipped (not needed) for versions before +// 8.10 but mixed cluster tests may not respect that - see the +// comment above. +excludeList.add('tsdb/20_mapping/Synthetic source') + BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> if (bwcVersion != VersionProperties.getElasticsearchVersion()) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index aeab22a6f5f35..c5d5dbec1ef15 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -101,7 +101,20 @@ public static class Builder extends MetadataFieldMapper.Builder { (previous, current, conflicts) -> (previous.value() == current.value()) || (previous.value() && current.value() == false) ); - private final Parameter mode; + /* + * The default mode for TimeSeries is left empty on purpose, so that mapping printings include the synthetic + * source mode. + */ + private final Parameter mode = new Parameter<>( + "mode", + true, + () -> null, + (n, c, o) -> Mode.valueOf(o.toString().toUpperCase(Locale.ROOT)), + m -> toType(m).enabled.explicit() ? null : toType(m).mode, + (b, n, v) -> b.field(n, v.toString().toLowerCase(Locale.ROOT)), + v -> v.toString().toLowerCase(Locale.ROOT) + ).setMergeValidator((previous, current, conflicts) -> (previous == current) || current != Mode.STORED) + .setSerializerCheck((includeDefaults, isConfigured, value) -> value != null); // don't emit if `enabled` is configured private final Parameter> includes = Parameter.stringArrayParam( "includes", false, @@ -115,22 +128,9 @@ public static class Builder extends MetadataFieldMapper.Builder { private final IndexMode indexMode; - public Builder(IndexMode indexMode, IndexVersion indexVersion) { + public Builder(IndexMode indexMode) { super(Defaults.NAME); this.indexMode = indexMode; - this.mode = new Parameter<>( - "mode", - true, - // The default mode for TimeSeries is left empty on purpose, so that mapping printings include the synthetic source mode. - () -> getIndexMode() == IndexMode.TIME_SERIES && indexVersion.between(IndexVersion.V_8_7_0, IndexVersion.V_8_10_0) - ? Mode.SYNTHETIC - : null, - (n, c, o) -> Mode.valueOf(o.toString().toUpperCase(Locale.ROOT)), - m -> toType(m).enabled.explicit() ? null : toType(m).mode, - (b, n, v) -> b.field(n, v.toString().toLowerCase(Locale.ROOT)), - v -> v.toString().toLowerCase(Locale.ROOT) - ).setMergeValidator((previous, current, conflicts) -> (previous == current) || current != Mode.STORED) - .setSerializerCheck((includeDefaults, isConfigured, value) -> value != null); // don't emit if `enabled` is configured } public Builder setSynthetic() { @@ -188,7 +188,7 @@ private IndexMode getIndexMode() { c -> c.getIndexSettings().getMode() == IndexMode.TIME_SERIES ? c.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersion.V_8_7_0) ? TSDB_DEFAULT : TSDB_LEGACY_DEFAULT : DEFAULT, - c -> new Builder(c.getIndexSettings().getMode(), c.getIndexSettings().getIndexVersionCreated()) + c -> new Builder(c.getIndexSettings().getMode()) ); static final class SourceFieldType extends MappedFieldType { @@ -313,7 +313,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(indexMode, IndexVersion.current()).init(this); + return new Builder(indexMode).init(this); } /** diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index 433ebc467483d..f683cb60c87c3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -12,8 +12,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; @@ -240,10 +238,4 @@ public void testSyntheticSourceInTimeSeries() throws IOException { assertTrue(mapper.sourceMapper().isSynthetic()); assertEquals("{\"_source\":{\"mode\":\"synthetic\"}}", mapper.sourceMapper().toString()); } - - public void testSyntheticSourceInTimeSeriesBwc() throws IOException { - SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(IndexMode.TIME_SERIES, IndexVersion.V_8_8_0).build(); - assertTrue(sourceMapper.isSynthetic()); - assertEquals("{\"_source\":{\"mode\":\"synthetic\"}}", sourceMapper.toString()); - } } diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 9df1dc24c2793..6d671a258c26a 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -381,7 +381,7 @@ public void testSearchRequestRuntimeFieldsAndMultifieldDetection() { public void testSyntheticSourceSearchLookup() throws IOException { // Build a mapping using synthetic source - SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null, IndexVersion.current()).setSynthetic().build(); + SourceFieldMapper sourceMapper = new SourceFieldMapper.Builder(null).setSynthetic().build(); RootObjectMapper root = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add( new KeywordFieldMapper.Builder("cat", IndexVersion.current()).ignoreAbove(100) ).build(MapperBuilderContext.root(true, false)); From 4c200aede09983e7c6e3f59973042d5e24231386 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Wed, 11 Oct 2023 13:16:39 +0100 Subject: [PATCH 154/176] [ML] Adjust AutodetectMemoryLimitIT/testManyDistinctOverFields (#100667) Adjust the AutodetectMemoryLimitIT/testManyDistinctOverFields integration test to account for changed memory consumption due to upgrade of Boost --- .../xpack/ml/integration/AutodetectMemoryLimitIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java index 5405852173a62..4b0783dda84cc 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java @@ -206,7 +206,7 @@ public void testManyDistinctOverFields() throws Exception { int user = 0; while (timestamp < now) { List data = new ArrayList<>(); - for (int i = 0; i < 10000; i++) { + for (int i = 0; i < 20000; i++) { // It's important that the values used here are either always represented in less than 16 UTF-8 bytes or // always represented in more than 22 UTF-8 bytes. Otherwise platform differences in when the small string // optimisation is used will make the results of this test very different for the different platforms. From 9fb550be44067ac9b484cb73af5a04521b6d0122 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 11 Oct 2023 14:24:32 +0200 Subject: [PATCH 155/176] WellKnownBinary#toWKB should not throw an IOException (#100669) The only reason this method is throwing an exception is because the method ByteArrayOutputStream#close() is declaring it although it is a noop. Therefore it can be safely ignored. Thanks @romseygeek for bringing into attention. --- .../geometry/utils/WellKnownBinary.java | 6 +++- .../geometry/utils/WKBTests.java | 33 +++++++++---------- .../mapper/LegacyGeoShapeFieldMapper.java | 2 +- .../index/mapper/GeoShapeFieldMapper.java | 3 +- .../GeoShapeWithDocValuesFieldMapper.java | 2 +- .../index/mapper/PointFieldMapper.java | 2 +- .../index/mapper/ShapeFieldMapper.java | 2 +- 7 files changed, 26 insertions(+), 24 deletions(-) diff --git a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownBinary.java b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownBinary.java index 9ded2106b2500..526a621674f6b 100644 --- a/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownBinary.java +++ b/libs/geo/src/main/java/org/elasticsearch/geometry/utils/WellKnownBinary.java @@ -24,6 +24,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.ArrayList; @@ -40,10 +41,13 @@ private WellKnownBinary() {} /** * Converts the given {@link Geometry} to WKB with the provided {@link ByteOrder} */ - public static byte[] toWKB(Geometry geometry, ByteOrder byteOrder) throws IOException { + public static byte[] toWKB(Geometry geometry, ByteOrder byteOrder) { try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { toWKB(geometry, outputStream, ByteBuffer.allocate(8).order(byteOrder)); return outputStream.toByteArray(); + } catch (IOException ioe) { + // Should never happen as the only method throwing IOException is ByteArrayOutputStream#close and it is a NOOP + throw new UncheckedIOException(ioe); } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/utils/WKBTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/utils/WKBTests.java index 9ede3d9db8126..5369475e4ed4f 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/utils/WKBTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/utils/WKBTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.nio.ByteOrder; import java.util.ArrayList; import java.util.List; @@ -35,47 +34,47 @@ public void testEmptyPoint() { assertEquals("Empty POINT cannot be represented in WKB", ex.getMessage()); } - public void testPoint() throws IOException { + public void testPoint() { Point point = GeometryTestUtils.randomPoint(randomBoolean()); assertWKB(point); } - public void testEmptyMultiPoint() throws IOException { + public void testEmptyMultiPoint() { MultiPoint multiPoint = MultiPoint.EMPTY; assertWKB(multiPoint); } - public void testMultiPoint() throws IOException { + public void testMultiPoint() { MultiPoint multiPoint = GeometryTestUtils.randomMultiPoint(randomBoolean()); assertWKB(multiPoint); } - public void testEmptyLine() throws IOException { + public void testEmptyLine() { Line line = Line.EMPTY; assertWKB(line); } - public void testLine() throws IOException { + public void testLine() { Line line = GeometryTestUtils.randomLine(randomBoolean()); assertWKB(line); } - public void tesEmptyMultiLine() throws IOException { + public void tesEmptyMultiLine() { MultiLine multiLine = MultiLine.EMPTY; assertWKB(multiLine); } - public void testMultiLine() throws IOException { + public void testMultiLine() { MultiLine multiLine = GeometryTestUtils.randomMultiLine(randomBoolean()); assertWKB(multiLine); } - public void testEmptyPolygon() throws IOException { + public void testEmptyPolygon() { Polygon polygon = Polygon.EMPTY; assertWKB(polygon); } - public void testPolygon() throws IOException { + public void testPolygon() { final boolean hasZ = randomBoolean(); Polygon polygon = GeometryTestUtils.randomPolygon(hasZ); if (randomBoolean()) { @@ -89,22 +88,22 @@ public void testPolygon() throws IOException { assertWKB(polygon); } - public void testEmptyMultiPolygon() throws IOException { + public void testEmptyMultiPolygon() { MultiPolygon multiPolygon = MultiPolygon.EMPTY; assertWKB(multiPolygon); } - public void testMultiPolygon() throws IOException { + public void testMultiPolygon() { MultiPolygon multiPolygon = GeometryTestUtils.randomMultiPolygon(randomBoolean()); assertWKB(multiPolygon); } - public void testEmptyGeometryCollection() throws IOException { + public void testEmptyGeometryCollection() { GeometryCollection collection = GeometryCollection.EMPTY; assertWKB(collection); } - public void testGeometryCollection() throws IOException { + public void testGeometryCollection() { GeometryCollection collection = GeometryTestUtils.randomGeometryCollection(randomBoolean()); assertWKB(collection); } @@ -115,7 +114,7 @@ public void testEmptyCircle() { assertEquals("Empty CIRCLE cannot be represented in WKB", ex.getMessage()); } - public void testCircle() throws IOException { + public void testCircle() { Circle circle = GeometryTestUtils.randomCircle(randomBoolean()); assertWKB(circle); } @@ -129,7 +128,7 @@ public void testEmptyRectangle() { assertEquals("Empty ENVELOPE cannot be represented in WKB", ex.getMessage()); } - public void testRectangle() throws IOException { + public void testRectangle() { Rectangle rectangle = GeometryTestUtils.randomRectangle(); assertWKB(rectangle); } @@ -138,7 +137,7 @@ private ByteOrder randomByteOrder() { return randomBoolean() ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN; } - private void assertWKB(Geometry geometry) throws IOException { + private void assertWKB(Geometry geometry) { final boolean hasZ = geometry.hasZ(); final ByteOrder byteOrder = randomByteOrder(); final byte[] b = WellKnownBinary.toWKB(geometry, byteOrder); diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index 46860ff38b8ca..51cc7541a9a4d 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -583,7 +583,7 @@ public String strategy() { } @Override - protected void index(DocumentParserContext context, ShapeBuilder shapeBuilder) throws IOException { + protected void index(DocumentParserContext context, ShapeBuilder shapeBuilder) { if (shapeBuilder == null) { return; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 7ae410a1a9dcb..ad287e1c6b005 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -22,7 +22,6 @@ import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; -import java.io.IOException; import java.util.List; import java.util.Map; import java.util.function.Function; @@ -191,7 +190,7 @@ public FieldMapper.Builder getMergeBuilder() { } @Override - protected void index(DocumentParserContext context, Geometry geometry) throws IOException { + protected void index(DocumentParserContext context, Geometry geometry) { if (geometry == null) { return; } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java index 55929a1c1b83e..13fb4246a5b3a 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapper.java @@ -340,7 +340,7 @@ public GeoShapeWithDocValuesFieldMapper( } @Override - protected void index(DocumentParserContext context, Geometry geometry) throws IOException { + protected void index(DocumentParserContext context, Geometry geometry) { // TODO: Make common with the index method ShapeFieldMapper if (geometry == null) { return; diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java index 378b78111ab19..f5cc7280aa8bb 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldMapper.java @@ -150,7 +150,7 @@ public PointFieldMapper( } @Override - protected void index(DocumentParserContext context, CartesianPoint point) throws IOException { + protected void index(DocumentParserContext context, CartesianPoint point) { if (fieldType().isIndexed()) { context.doc().add(new XYPointField(fieldType().name(), (float) point.getX(), (float) point.getY())); } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java index 127a4fd1050cd..838fd56cfc11a 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldMapper.java @@ -204,7 +204,7 @@ public ShapeFieldMapper( } @Override - protected void index(DocumentParserContext context, Geometry geometry) throws IOException { + protected void index(DocumentParserContext context, Geometry geometry) { // TODO: Make common with the index method GeoShapeWithDocValuesFieldMapper if (geometry == null) { return; From 14263a78e88c0197cbccf1c94aafda32898b7c30 Mon Sep 17 00:00:00 2001 From: William Brafford Date: Wed, 11 Oct 2023 08:49:18 -0400 Subject: [PATCH 156/176] Remove uses of Version from Plugin CLI commands (#100298) The Plugin CLI can generally treat strings opaquely. We had some logic comparing earlier versions, but what we really care about with most of our plugins is whether or not they were built with the current version of Elasticsearch, not whether they were built before or after. (This question will be trickier with stable plugins, but none of that code is in the CLI.) The CLI classes can be cleaned up even more once Version is removed from PluginDescriptor. Some of the tests can't use opaque strings for versions until PluginDescriptor can handle them. * Remove Version from Install and List plugin actions * Remove Version from SyncPluginsAction --- .../plugins/cli/InstallPluginAction.java | 37 +++++++++++------- .../plugins/cli/ListPluginsCommand.java | 12 ++++-- .../plugins/cli/SyncPluginsAction.java | 12 +++--- .../plugins/cli/InstallPluginActionTests.java | 39 ++++++++++++------- .../plugins/cli/ListPluginsCommandTests.java | 2 +- .../plugins/cli/SyncPluginsActionTests.java | 15 ++++--- 6 files changed, 74 insertions(+), 43 deletions(-) diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java index d32cbd8dd1736..c7bee4a6c172d 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java @@ -23,7 +23,6 @@ import org.bouncycastle.openpgp.operator.jcajce.JcaKeyFingerprintCalculator; import org.bouncycastle.openpgp.operator.jcajce.JcaPGPContentVerifierBuilderProvider; import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.bootstrap.PluginPolicyInfo; import org.elasticsearch.bootstrap.PolicyUtil; import org.elasticsearch.cli.ExitCodes; @@ -84,6 +83,8 @@ import java.util.Set; import java.util.Timer; import java.util.TimerTask; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.zip.ZipEntry; @@ -303,7 +304,7 @@ private Path download(InstallablePlugin plugin, Path tmpDir) throws Exception { // else carry on to regular download } - final String url = getElasticUrl(getStagingHash(), Version.CURRENT, isSnapshot(), pluginId, Platforms.PLATFORM_NAME); + final String url = getElasticUrl(getStagingHash(), isSnapshot(), pluginId, Platforms.PLATFORM_NAME); terminal.println(logPrefix + "Downloading " + pluginId + " from elastic"); return downloadAndValidate(url, tmpDir, true); } @@ -341,7 +342,7 @@ private Path getPluginArchivePath(String pluginId, String pluginArchiveDir) thro if (Files.isDirectory(path) == false) { throw new UserException(ExitCodes.CONFIG, "Location in ES_PLUGIN_ARCHIVE_DIR is not a directory"); } - return PathUtils.get(pluginArchiveDir, pluginId + "-" + Version.CURRENT + (isSnapshot() ? "-SNAPSHOT" : "") + ".zip"); + return PathUtils.get(pluginArchiveDir, pluginId + "-" + Build.current().qualifiedVersion() + ".zip"); } // pkg private so tests can override @@ -356,13 +357,8 @@ boolean isSnapshot() { /** * Returns the url for an official elasticsearch plugin. */ - private String getElasticUrl( - final String stagingHash, - final Version version, - final boolean isSnapshot, - final String pluginId, - final String platform - ) throws IOException, UserException { + private String getElasticUrl(final String stagingHash, final boolean isSnapshot, final String pluginId, final String platform) + throws IOException, UserException { final String baseUrl; if (isSnapshot && stagingHash == null) { throw new UserException( @@ -370,11 +366,21 @@ private String getElasticUrl( "attempted to install release build of official plugin on snapshot build of Elasticsearch" ); } + // assumption: we will only be publishing plugins to snapshot or staging when they're versioned + String semanticVersion = getSemanticVersion(Build.current().version()); + if (semanticVersion == null) { + throw new UserException( + ExitCodes.CONFIG, + "attempted to download a plugin for a non-semantically-versioned build of Elasticsearch: [" + + Build.current().version() + + "]" + ); + } if (stagingHash != null) { if (isSnapshot) { - baseUrl = nonReleaseUrl("snapshots", version, stagingHash, pluginId); + baseUrl = nonReleaseUrl("snapshots", semanticVersion, stagingHash, pluginId); } else { - baseUrl = nonReleaseUrl("staging", version, stagingHash, pluginId); + baseUrl = nonReleaseUrl("staging", semanticVersion, stagingHash, pluginId); } } else { baseUrl = String.format(Locale.ROOT, "https://artifacts.elastic.co/downloads/elasticsearch-plugins/%s", pluginId); @@ -393,7 +399,7 @@ private String getElasticUrl( return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Build.current().qualifiedVersion()); } - private static String nonReleaseUrl(final String hostname, final Version version, final String stagingHash, final String pluginId) { + private static String nonReleaseUrl(final String hostname, final String version, final String stagingHash, final String pluginId) { return String.format( Locale.ROOT, "https://%s.elastic.co/%s-%s/downloads/elasticsearch-plugins/%s", @@ -1088,4 +1094,9 @@ private static void setFileAttributes(final Path path, final Set getPluginsToUpgrade( throw new RuntimeException("Couldn't find a PluginInfo for [" + eachPluginId + "], which should be impossible"); }); - if (info.getElasticsearchVersion().before(Version.CURRENT)) { + if (info.getElasticsearchVersion().toString().equals(Build.current().version()) == false) { this.terminal.println( Terminal.Verbosity.VERBOSE, String.format( Locale.ROOT, - "Official plugin [%s] is out-of-date (%s versus %s), upgrading", + "Official plugin [%s] is out-of-sync (%s versus %s), upgrading", eachPluginId, info.getElasticsearchVersion(), - Version.CURRENT + Build.current().version() ) ); return true; @@ -278,14 +278,14 @@ private List getExistingPlugins() throws PluginSyncException { // Check for a version mismatch, unless it's an official plugin since we can upgrade them. if (InstallPluginAction.OFFICIAL_PLUGINS.contains(info.getName()) - && info.getElasticsearchVersion().equals(Version.CURRENT) == false) { + && info.getElasticsearchVersion().toString().equals(Build.current().version()) == false) { this.terminal.errorPrintln( String.format( Locale.ROOT, "WARNING: plugin [%s] was built for Elasticsearch version %s but version %s is required", info.getName(), info.getElasticsearchVersion(), - Version.CURRENT + Build.current().version() ) ); } diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java index 2a66ed3cf4349..2da05d87f831f 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java @@ -32,7 +32,6 @@ import org.bouncycastle.openpgp.operator.jcajce.JcePBESecretKeyDecryptorBuilder; import org.bouncycastle.openpgp.operator.jcajce.JcePBESecretKeyEncryptorBuilder; import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.ProcessInfo; @@ -111,6 +110,7 @@ import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -298,7 +298,7 @@ private static String[] pluginProperties(String name, String[] additionalProps, "version", "1.0", "elasticsearch.version", - Version.CURRENT.toString(), + InstallPluginAction.getSemanticVersion(Build.current().version()), "java.version", System.getProperty("java.specification.version") @@ -724,7 +724,7 @@ public void testPluginPermissions() throws Exception { final Path platformBinDir = platformNameDir.resolve("bin"); Files.createDirectories(platformBinDir); - Files.createFile(tempPluginDir.resolve("fake-" + Version.CURRENT.toString() + ".jar")); + Files.createFile(tempPluginDir.resolve("fake-" + Build.current().version() + ".jar")); Files.createFile(platformBinDir.resolve("fake_executable")); Files.createDirectory(resourcesDir); Files.createFile(resourcesDir.resolve("resource")); @@ -740,7 +740,7 @@ public void testPluginPermissions() throws Exception { final Path platformName = platform.resolve("linux-x86_64"); final Path bin = platformName.resolve("bin"); assert755(fake); - assert644(fake.resolve("fake-" + Version.CURRENT + ".jar")); + assert644(fake.resolve("fake-" + Build.current().version() + ".jar")); assert755(resources); assert644(resources.resolve("resource")); assert755(platform); @@ -1110,8 +1110,8 @@ public void testOfficialPluginSnapshot() throws Exception { String url = String.format( Locale.ROOT, "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip", - Version.CURRENT, - Build.current().qualifiedVersion() + InstallPluginAction.getSemanticVersion(Build.current().version()), + Build.current().version() ); assertInstallPluginFromUrl("analysis-icu", url, "abc123", true); } @@ -1120,8 +1120,8 @@ public void testInstallReleaseBuildOfPluginOnSnapshotBuild() { String url = String.format( Locale.ROOT, "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip", - Version.CURRENT, - Build.current().qualifiedVersion() + InstallPluginAction.getSemanticVersion(Build.current().version()), + Build.current().version() ); // attempting to install a release build of a plugin (no staging ID) on a snapshot build should throw a user exception final UserException e = expectThrows( @@ -1137,9 +1137,9 @@ public void testInstallReleaseBuildOfPluginOnSnapshotBuild() { public void testOfficialPluginStaging() throws Exception { String url = "https://staging.elastic.co/" - + Version.CURRENT + + InstallPluginAction.getSemanticVersion(Build.current().version()) + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Build.current().qualifiedVersion() + + Build.current().version() + ".zip"; assertInstallPluginFromUrl("analysis-icu", url, "abc123", false); } @@ -1148,7 +1148,7 @@ public void testOfficialPlatformPlugin() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME + "-" - + Build.current().qualifiedVersion() + + Build.current().version() + ".zip"; assertInstallPluginFromUrl("analysis-icu", url, null, false); } @@ -1157,16 +1157,16 @@ public void testOfficialPlatformPluginSnapshot() throws Exception { String url = String.format( Locale.ROOT, "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s-%s.zip", - Version.CURRENT, + InstallPluginAction.getSemanticVersion(Build.current().version()), Platforms.PLATFORM_NAME, - Build.current().qualifiedVersion() + Build.current().version() ); assertInstallPluginFromUrl("analysis-icu", url, "abc123", true); } public void testOfficialPlatformPluginStaging() throws Exception { String url = "https://staging.elastic.co/" - + Version.CURRENT + + InstallPluginAction.getSemanticVersion(Build.current().version()) + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME + "-" @@ -1580,6 +1580,17 @@ public void testStablePluginWithoutNamedComponentsFile() throws Exception { assertNamedComponentFile("stable1", env.v2().pluginsFile(), namedComponentsJSON()); } + public void testGetSemanticVersion() { + assertThat(InstallPluginAction.getSemanticVersion("1.2.3"), equalTo("1.2.3")); + assertThat(InstallPluginAction.getSemanticVersion("123.456.789"), equalTo("123.456.789")); + assertThat(InstallPluginAction.getSemanticVersion("1.2.3-SNAPSHOT"), equalTo("1.2.3")); + assertThat(InstallPluginAction.getSemanticVersion("1.2.3foobar"), equalTo("1.2.3")); + assertThat(InstallPluginAction.getSemanticVersion("1.2.3.4"), equalTo("1.2.3")); + assertThat(InstallPluginAction.getSemanticVersion("1.2"), nullValue()); + assertThat(InstallPluginAction.getSemanticVersion("foo"), nullValue()); + assertThat(InstallPluginAction.getSemanticVersion("foo-1.2.3"), nullValue()); + } + private Map> namedComponentsMap() { Map> result = new LinkedHashMap<>(); Map extensibles = new LinkedHashMap<>(); diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java index e1577f7d101be..b225bc441794a 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/ListPluginsCommandTests.java @@ -215,7 +215,7 @@ public void testExistingIncompatiblePlugin() throws Exception { "version", "1.0", "elasticsearch.version", - Version.fromString("1.0.0").toString(), + "1.0.0", "java.version", System.getProperty("java.specification.version"), "classname", diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java index 2c200df2a7d56..9802b4039bb7b 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/SyncPluginsActionTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.plugins.cli; import org.apache.lucene.tests.util.LuceneTestCase; -import org.elasticsearch.Version; +import org.elasticsearch.Build; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.settings.Settings; @@ -17,6 +17,7 @@ import org.elasticsearch.plugins.PluginTestUtil; import org.elasticsearch.plugins.cli.SyncPluginsAction.PluginChanges; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.hamcrest.Matchers; import org.junit.Before; import org.mockito.InOrder; @@ -26,6 +27,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.List; +import java.util.Objects; import java.util.Optional; import static org.hamcrest.Matchers.containsString; @@ -129,7 +131,7 @@ public void test_getPluginChanges_withPluginToInstall_returnsPluginToInstall() t * since we can't automatically upgrade it. */ public void test_getPluginChanges_withPluginToUpgrade_returnsNoChanges() throws Exception { - createPlugin("my-plugin", Version.CURRENT.previousMajor()); + createPlugin("my-plugin", VersionUtils.getPreviousVersion().toString()); config.setPlugins(List.of(new InstallablePlugin("my-plugin"))); final PluginChanges pluginChanges = action.getPluginChanges(config, Optional.empty()); @@ -142,7 +144,7 @@ public void test_getPluginChanges_withPluginToUpgrade_returnsNoChanges() throws * but needs to be upgraded, then we calculate that the plugin needs to be upgraded. */ public void test_getPluginChanges_withOfficialPluginToUpgrade_returnsPluginToUpgrade() throws Exception { - createPlugin("analysis-icu", Version.CURRENT.previousMajor()); + createPlugin("analysis-icu", VersionUtils.getPreviousVersion().toString()); config.setPlugins(List.of(new InstallablePlugin("analysis-icu"))); final PluginChanges pluginChanges = action.getPluginChanges(config, Optional.empty()); @@ -329,10 +331,11 @@ public void test_performSync_withPluginsToUpgrade_callsUpgradeAction() throws Ex } private void createPlugin(String name) throws IOException { - createPlugin(name, Version.CURRENT); + String semanticVersion = InstallPluginAction.getSemanticVersion(Build.current().version()); + createPlugin(name, Objects.nonNull(semanticVersion) ? semanticVersion : Build.current().version()); } - private void createPlugin(String name, Version version) throws IOException { + private void createPlugin(String name, String version) throws IOException { PluginTestUtil.writePluginProperties( env.pluginsFile().resolve(name), "description", @@ -342,7 +345,7 @@ private void createPlugin(String name, Version version) throws IOException { "version", "1.0", "elasticsearch.version", - version.toString(), + version, "java.version", System.getProperty("java.specification.version"), "classname", From 43f4ff3034f8118a2c1b15fea90320a8203fbcb0 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 11 Oct 2023 14:21:12 +0100 Subject: [PATCH 157/176] Encapsulate snapshot repo cleanup (#100657) Repository cleanup is basically the same as deleting an empty set of snapshots, and in particular it needs the same context as a snapshots deletion. This commit moves the cleanup process and its dependent methods within `SnapshotsDeletion` and removes a great deal of unnecessary argument-passing and other duplication. Relates #100568 --- .../TransportCleanupRepositoryAction.java | 2 + .../blobstore/BlobStoreRepository.java | 371 +++++++++--------- 2 files changed, 180 insertions(+), 193 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index bd9382aeaa758..1a626fe4dce31 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -213,6 +213,8 @@ public void onFailure(Exception e) { public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { startedCleanup = true; logger.debug("Initialized repository cleanup in cluster state for [{}][{}]", repositoryName, repositoryStateId); + // We fork here just to call SnapshotsService#minCompatibleVersion (which may be to expensive to run directly) but + // BlobStoreRepository#cleanup forks again straight away. TODO reduce the forking here. threadPool.executor(ThreadPool.Names.SNAPSHOT) .execute( ActionRunnable.wrap( diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 1e2969b255877..39d11e9d9a4f3 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -849,6 +849,33 @@ public void onFailure(Exception e) { }); } + /** + * Runs cleanup actions on the repository. Increments the repository state id by one before executing any modifications on the + * repository. + * TODO: Add shard level cleanups + * TODO: Add unreferenced index metadata cleanup + *
      + *
    • Deleting stale indices
    • + *
    • Deleting unreferenced root level blobs
    • + *
    + * + * @param repositoryDataGeneration Generation of {@link RepositoryData} at start of process + * @param repositoryFormatIndexVersion Repository format version + * @param listener Listener to complete when done + */ + public void cleanup( + long repositoryDataGeneration, + IndexVersion repositoryFormatIndexVersion, + ActionListener listener + ) { + createSnapshotsDeletion( + List.of(), + repositoryDataGeneration, + repositoryFormatIndexVersion, + listener.delegateFailureAndWrap((delegate, snapshotsDeletion) -> snapshotsDeletion.runCleanup(delegate)) + ); + } + private void createSnapshotsDeletion( Collection snapshotIds, long repositoryDataGeneration, @@ -890,7 +917,7 @@ private void createSnapshotsDeletion( class SnapshotsDeletion { /** - * The IDs of the snapshots to delete. + * The IDs of the snapshots to delete. This collection is empty if the deletion is a repository cleanup. */ private final Collection snapshotIds; @@ -976,7 +1003,7 @@ private record ShardSnapshotMetaDeleteResult( // --------------------------------------------------------------------------------------------------------------------------------- // The overall flow of execution - private void runDelete(SnapshotDeleteListener listener) { + void runDelete(SnapshotDeleteListener listener) { if (useShardGenerations) { // First write the new shard state metadata (with the removed snapshot) and compute deletion targets final ListenableFuture> writeShardMetaDataAndComputeDeletesStep = @@ -1009,7 +1036,7 @@ private void runDelete(SnapshotDeleteListener listener) { listener.onRepositoryDataWritten(newRepositoryData); // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion try (var refs = new RefCountingRunnable(listener::onDone)) { - cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); + cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener().map(ignored -> null)); cleanupUnlinkedShardLevelBlobs(writeShardMetaDataAndComputeDeletesStep.result(), refs.acquireListener()); } }, listener::onFailure)); @@ -1026,7 +1053,7 @@ private void runDelete(SnapshotDeleteListener listener) { listener.onDone(); })) { // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion - cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); + cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener().map(ignored -> null)); // writeIndexGen finishes on master-service thread so must fork here. snapshotExecutor.execute( @@ -1043,6 +1070,34 @@ private void runDelete(SnapshotDeleteListener listener) { } } + void runCleanup(ActionListener listener) { + final Set survivingIndexIds = originalRepositoryData.getIndices() + .values() + .stream() + .map(IndexId::getId) + .collect(Collectors.toSet()); + final List staleRootBlobs = staleRootBlobs(originalRepositoryData, originalRootBlobs.keySet()); + if (survivingIndexIds.equals(originalIndexContainers.keySet()) && staleRootBlobs.isEmpty()) { + // Nothing to clean up we return + listener.onResponse(new RepositoryCleanupResult(DeleteResult.ZERO)); + } else { + // write new index-N blob to ensure concurrent operations will fail + writeIndexGen( + originalRepositoryData, + originalRepositoryDataGeneration, + repositoryFormatIndexVersion, + Function.identity(), + listener.delegateFailureAndWrap( + // TODO should we pass newRepositoryData to cleanupStaleBlobs()? + (l, newRepositoryData) -> cleanupUnlinkedRootAndIndicesBlobs( + originalRepositoryData, + l.map(RepositoryCleanupResult::new) + ) + ) + ); + } + } + // --------------------------------------------------------------------------------------------------------------------------------- // Updating the shard-level metadata and accumulating results @@ -1251,14 +1306,6 @@ private static List unusedBlobs( // --------------------------------------------------------------------------------------------------------------------------------- // Cleaning up dangling blobs - /** - * Delete any dangling blobs in the repository root (i.e. {@link RepositoryData}, {@link SnapshotInfo} and {@link Metadata} blobs) - * as well as any containers for indices that are now completely unreferenced. - */ - private void cleanupUnlinkedRootAndIndicesBlobs(RepositoryData newRepositoryData, ActionListener listener) { - cleanupStaleBlobs(snapshotIds, originalIndexContainers, originalRootBlobs, newRepositoryData, listener.map(ignored -> null)); - } - private void cleanupUnlinkedShardLevelBlobs( Collection shardDeleteResults, ActionListener listener @@ -1295,201 +1342,139 @@ private Iterator resolveFilesToDelete(Collection snapshotIds, - Map originalIndexContainers, - Map originalRootBlobs, - RepositoryData newRepositoryData, - ActionListener listener - ) { - final var blobsDeleted = new AtomicLong(); - final var bytesDeleted = new AtomicLong(); - try (var listeners = new RefCountingListener(listener.map(ignored -> DeleteResult.of(blobsDeleted.get(), bytesDeleted.get())))) { - - final List staleRootBlobs = staleRootBlobs(newRepositoryData, originalRootBlobs.keySet()); - if (staleRootBlobs.isEmpty() == false) { - staleBlobDeleteRunner.enqueueTask(listeners.acquire(ref -> { - try (ref) { - logStaleRootLevelBlobs(newRepositoryData.getGenId() - 1, snapshotIds, staleRootBlobs); - deleteFromContainer(blobContainer(), staleRootBlobs.iterator()); - for (final var staleRootBlob : staleRootBlobs) { - bytesDeleted.addAndGet(originalRootBlobs.get(staleRootBlob).length()); - } - blobsDeleted.addAndGet(staleRootBlobs.size()); - } catch (Exception e) { - logger.warn( - () -> format( - "[%s] The following blobs are no longer part of any snapshot [%s] but failed to remove them", - metadata.name(), - staleRootBlobs - ), - e - ); - } - })); - } + /** + * Cleans up stale blobs directly under the repository root as well as all indices paths that aren't referenced by any existing + * snapshots. This method is only to be called directly after a new {@link RepositoryData} was written to the repository. + * + * @param newRepositoryData new repository data that was just written + * @param listener listener to invoke with the combined {@link DeleteResult} of all blobs removed in this operation + */ + private void cleanupUnlinkedRootAndIndicesBlobs(RepositoryData newRepositoryData, ActionListener listener) { + final var blobsDeleted = new AtomicLong(); + final var bytesDeleted = new AtomicLong(); + try ( + var listeners = new RefCountingListener(listener.map(ignored -> DeleteResult.of(blobsDeleted.get(), bytesDeleted.get()))) + ) { - final var survivingIndexIds = newRepositoryData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet()); - for (final var indexEntry : originalIndexContainers.entrySet()) { - final var indexId = indexEntry.getKey(); - if (survivingIndexIds.contains(indexId)) { - continue; + final List staleRootBlobs = staleRootBlobs(newRepositoryData, originalRootBlobs.keySet()); + if (staleRootBlobs.isEmpty() == false) { + staleBlobDeleteRunner.enqueueTask(listeners.acquire(ref -> { + try (ref) { + logStaleRootLevelBlobs(newRepositoryData.getGenId() - 1, snapshotIds, staleRootBlobs); + deleteFromContainer(blobContainer(), staleRootBlobs.iterator()); + for (final var staleRootBlob : staleRootBlobs) { + bytesDeleted.addAndGet(originalRootBlobs.get(staleRootBlob).length()); + } + blobsDeleted.addAndGet(staleRootBlobs.size()); + } catch (Exception e) { + logger.warn( + () -> format( + "[%s] The following blobs are no longer part of any snapshot [%s] but failed to remove them", + metadata.name(), + staleRootBlobs + ), + e + ); + } + })); } - staleBlobDeleteRunner.enqueueTask(listeners.acquire(ref -> { - try (ref) { - logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexId); - final var deleteResult = indexEntry.getValue().delete(OperationPurpose.SNAPSHOT); - blobsDeleted.addAndGet(deleteResult.blobsDeleted()); - bytesDeleted.addAndGet(deleteResult.bytesDeleted()); - logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexId); - } catch (IOException e) { - logger.warn(() -> format(""" - [%s] index %s is no longer part of any snapshot in the repository, \ - but failed to clean up its index folder""", metadata.name(), indexId), e); + + final var survivingIndexIds = newRepositoryData.getIndices() + .values() + .stream() + .map(IndexId::getId) + .collect(Collectors.toSet()); + for (final var indexEntry : originalIndexContainers.entrySet()) { + final var indexId = indexEntry.getKey(); + if (survivingIndexIds.contains(indexId)) { + continue; } - })); + staleBlobDeleteRunner.enqueueTask(listeners.acquire(ref -> { + try (ref) { + logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexId); + final var deleteResult = indexEntry.getValue().delete(OperationPurpose.SNAPSHOT); + blobsDeleted.addAndGet(deleteResult.blobsDeleted()); + bytesDeleted.addAndGet(deleteResult.bytesDeleted()); + logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexId); + } catch (IOException e) { + logger.warn(() -> format(""" + [%s] index %s is no longer part of any snapshot in the repository, \ + but failed to clean up its index folder""", metadata.name(), indexId), e); + } + })); + } } - } - // If we did the cleanup of stale indices purely using a throttled executor then there would be no backpressure to prevent us from - // falling arbitrarily far behind. But nor do we want to dedicate all the SNAPSHOT threads to stale index cleanups because that - // would slow down other snapshot operations in situations that do not need backpressure. - // - // The solution is to dedicate one SNAPSHOT thread to doing the cleanups eagerly, alongside the throttled executor which spreads - // the rest of the work across the other threads if they are free. If the eager cleanup loop doesn't finish before the next one - // starts then we dedicate another SNAPSHOT thread to the deletions, and so on, until eventually either we catch up or the SNAPSHOT - // pool is fully occupied with blob deletions, which pushes back on other snapshot operations. + // If we did the cleanup of stale indices purely using a throttled executor then there would be no backpressure to prevent us + // from falling arbitrarily far behind. But nor do we want to dedicate all the SNAPSHOT threads to stale index cleanups because + // that would slow down other snapshot operations in situations that do not need backpressure. + // + // The solution is to dedicate one SNAPSHOT thread to doing the cleanups eagerly, alongside the throttled executor which spreads + // the rest of the work across the other threads if they are free. If the eager cleanup loop doesn't finish before the next one + // starts then we dedicate another SNAPSHOT thread to the deletions, and so on, until eventually either we catch up or the + // SNAPSHOT pool is fully occupied with blob deletions, which pushes back on other snapshot operations. - staleBlobDeleteRunner.runSyncTasksEagerly(threadPool.executor(ThreadPool.Names.SNAPSHOT)); - } + staleBlobDeleteRunner.runSyncTasksEagerly(snapshotExecutor); + } - /** - * Runs cleanup actions on the repository. Increments the repository state id by one before executing any modifications on the - * repository. - * TODO: Add shard level cleanups - * TODO: Add unreferenced index metadata cleanup - *
      - *
    • Deleting stale indices
    • - *
    • Deleting unreferenced root level blobs
    • - *
    - * @param originalRepositoryDataGeneration Current repository state id - * @param repositoryFormatIndexVersion version of the updated repository metadata to write - * @param listener Listener to complete when done - */ - public void cleanup( - long originalRepositoryDataGeneration, - IndexVersion repositoryFormatIndexVersion, - ActionListener listener - ) { - try { - if (isReadOnly()) { - throw new RepositoryException(metadata.name(), "cannot run cleanup on readonly repository"); - } - Map originalRootBlobs = blobContainer().listBlobs(OperationPurpose.SNAPSHOT); - final RepositoryData originalRepositoryData = safeRepositoryData(originalRepositoryDataGeneration, originalRootBlobs); - final Map originalIndexContainers = blobStore().blobContainer(indicesPath()) - .children(OperationPurpose.SNAPSHOT); - final Set survivingIndexIds = originalRepositoryData.getIndices() - .values() + // Finds all blobs directly under the repository root path that are not referenced by the current RepositoryData + private static List staleRootBlobs(RepositoryData newRepositoryData, Set originalRootBlobNames) { + final Set allSnapshotIds = newRepositoryData.getSnapshotIds() .stream() - .map(IndexId::getId) + .map(SnapshotId::getUUID) .collect(Collectors.toSet()); - final List staleRootBlobs = staleRootBlobs(originalRepositoryData, originalRootBlobs.keySet()); - if (survivingIndexIds.equals(originalIndexContainers.keySet()) && staleRootBlobs.isEmpty()) { - // Nothing to clean up we return - listener.onResponse(new RepositoryCleanupResult(DeleteResult.ZERO)); - } else { - // write new index-N blob to ensure concurrent operations will fail - writeIndexGen( - originalRepositoryData, - originalRepositoryDataGeneration, - repositoryFormatIndexVersion, - Function.identity(), - listener.delegateFailureAndWrap( - (l, v) -> cleanupStaleBlobs( - Collections.emptyList(), - originalIndexContainers, - originalRootBlobs, - originalRepositoryData, - l.map(RepositoryCleanupResult::new) - ) - ) - ); - } - } catch (Exception e) { - listener.onFailure(e); - } - } - - // Finds all blobs directly under the repository root path that are not referenced by the current RepositoryData - private static List staleRootBlobs(RepositoryData originalRepositoryData, Set originalRootBlobNames) { - final Set allSnapshotIds = originalRepositoryData.getSnapshotIds() - .stream() - .map(SnapshotId::getUUID) - .collect(Collectors.toSet()); - return originalRootBlobNames.stream().filter(blob -> { - if (FsBlobContainer.isTempBlobName(blob)) { - return true; - } - if (blob.endsWith(".dat")) { - final String foundUUID; - if (blob.startsWith(SNAPSHOT_PREFIX)) { - foundUUID = blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()); - assert SNAPSHOT_FORMAT.blobName(foundUUID).equals(blob); - } else if (blob.startsWith(METADATA_PREFIX)) { - foundUUID = blob.substring(METADATA_PREFIX.length(), blob.length() - ".dat".length()); - assert GLOBAL_METADATA_FORMAT.blobName(foundUUID).equals(blob); - } else { - return false; + return originalRootBlobNames.stream().filter(blob -> { + if (FsBlobContainer.isTempBlobName(blob)) { + return true; } - return allSnapshotIds.contains(foundUUID) == false; - } else if (blob.startsWith(INDEX_FILE_PREFIX)) { - // TODO: Include the current generation here once we remove keeping index-(N-1) around from #writeIndexGen - try { - return originalRepositoryData.getGenId() > Long.parseLong(blob.substring(INDEX_FILE_PREFIX.length())); - } catch (NumberFormatException nfe) { - // odd case of an extra file with the index- prefix that we can't identify - return false; + if (blob.endsWith(".dat")) { + final String foundUUID; + if (blob.startsWith(SNAPSHOT_PREFIX)) { + foundUUID = blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length()); + assert SNAPSHOT_FORMAT.blobName(foundUUID).equals(blob); + } else if (blob.startsWith(METADATA_PREFIX)) { + foundUUID = blob.substring(METADATA_PREFIX.length(), blob.length() - ".dat".length()); + assert GLOBAL_METADATA_FORMAT.blobName(foundUUID).equals(blob); + } else { + return false; + } + return allSnapshotIds.contains(foundUUID) == false; + } else if (blob.startsWith(INDEX_FILE_PREFIX)) { + // TODO: Include the current generation here once we remove keeping index-(N-1) around from #writeIndexGen + try { + return newRepositoryData.getGenId() > Long.parseLong(blob.substring(INDEX_FILE_PREFIX.length())); + } catch (NumberFormatException nfe) { + // odd case of an extra file with the index- prefix that we can't identify + return false; + } } - } - return false; - }).toList(); - } + return false; + }).toList(); + } - private void logStaleRootLevelBlobs( - long originalRepositoryDataGeneration, - Collection snapshotIds, - List blobsToDelete - ) { - if (logger.isInfoEnabled()) { - // If we're running root level cleanup as part of a snapshot delete we should not log the snapshot- and global metadata - // blobs associated with the just deleted snapshots as they are expected to exist and not stale. Otherwise every snapshot - // delete would also log a confusing INFO message about "stale blobs". - final Set blobNamesToIgnore = snapshotIds.stream() - .flatMap( - snapshotId -> Stream.of( - GLOBAL_METADATA_FORMAT.blobName(snapshotId.getUUID()), - SNAPSHOT_FORMAT.blobName(snapshotId.getUUID()), - INDEX_FILE_PREFIX + originalRepositoryDataGeneration + private void logStaleRootLevelBlobs( + long newestStaleRepositoryDataGeneration, + Collection snapshotIds, + List blobsToDelete + ) { + if (logger.isInfoEnabled()) { + // If we're running root level cleanup as part of a snapshot delete we should not log the snapshot- and global metadata + // blobs associated with the just deleted snapshots as they are expected to exist and not stale. Otherwise every snapshot + // delete would also log a confusing INFO message about "stale blobs". + final Set blobNamesToIgnore = snapshotIds.stream() + .flatMap( + snapshotId -> Stream.of( + GLOBAL_METADATA_FORMAT.blobName(snapshotId.getUUID()), + SNAPSHOT_FORMAT.blobName(snapshotId.getUUID()), + INDEX_FILE_PREFIX + newestStaleRepositoryDataGeneration + ) ) - ) - .collect(Collectors.toSet()); - final List blobsToLog = blobsToDelete.stream().filter(b -> blobNamesToIgnore.contains(b) == false).toList(); - if (blobsToLog.isEmpty() == false) { - logger.info("[{}] Found stale root level blobs {}. Cleaning them up", metadata.name(), blobsToLog); + .collect(Collectors.toSet()); + final List blobsToLog = blobsToDelete.stream().filter(b -> blobNamesToIgnore.contains(b) == false).toList(); + if (blobsToLog.isEmpty() == false) { + logger.info("[{}] Found stale root level blobs {}. Cleaning them up", metadata.name(), blobsToLog); + } } } } From 64047234dcb6f8e22a0094e3d6025328077ed5b1 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 11 Oct 2023 06:37:03 -0700 Subject: [PATCH 158/176] Updating testing and logging for dense_vector dynamic dims (#100546) This adds a test for dynamic dims update mapping merges. Also, this adds logging to help investigate a periodically failing test. related to: https://github.com/elastic/elasticsearch/issues/100502 --- .../60_dense_vector_dynamic_mapping.yml | 24 +++++++++++++++++++ .../vectors/DenseVectorFieldMapperTests.java | 22 +++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml index d2c02fcbff38e..4ef700f807c13 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml @@ -3,6 +3,30 @@ setup: version: ' - 8.10.99' reason: 'Dynamic mapping of floats to dense_vector was added in 8.11' + # Additional logging for issue: https://github.com/elastic/elasticsearch/issues/100502 + - do: + cluster.put_settings: + body: > + { + "persistent": { + "logger.org.elasticsearch.index": "TRACE" + } + } + +--- +teardown: + - skip: + version: ' - 8.10.99' + reason: 'Dynamic mapping of floats to dense_vector was added in 8.11' + + - do: + cluster.put_settings: + body: > + { + "persistent": { + "logger.org.elasticsearch.index": null + } + } --- "Fields with float arrays below the threshold still map as float": diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java index 2899dab6ff303..183c0083c7da1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java @@ -20,7 +20,9 @@ import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.codec.PerFieldMapperCodec; @@ -231,6 +233,26 @@ public void testDims() { } } + public void testMergeDims() throws IOException { + XContentBuilder mapping = mapping(b -> { + b.startObject("field"); + b.field("type", "dense_vector"); + b.endObject(); + }); + MapperService mapperService = createMapperService(mapping); + + mapping = mapping(b -> { + b.startObject("field"); + b.field("type", "dense_vector").field("dims", 4).field("similarity", "cosine").field("index", true); + b.endObject(); + }); + merge(mapperService, mapping); + assertEquals( + XContentHelper.convertToMap(BytesReference.bytes(mapping), false, mapping.contentType()).v2(), + XContentHelper.convertToMap(mapperService.documentMapper().mappingSource().uncompressed(), false, mapping.contentType()).v2() + ); + } + public void testDefaults() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "dense_vector").field("dims", 3))); From e411b57baf5f19159fa6de8766a2fe5718800513 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 11 Oct 2023 06:56:43 -0700 Subject: [PATCH 159/176] Harden discard logic in ExchangeBuffer (#100636) We can leave pages in the ExchangeBuffer if the noMoreInputs flag is set to true after we've checked it but before we add pages to the queue. I can reliably reproduce the testFromLimit by inserting a delay in between. This change hardens the discard logic by moving the check after we've added a Page to the queue. If the noMoreInputs flag is set to true, we will drain the pages from the queue. --- .../operator/exchange/ExchangeBuffer.java | 23 +++-- .../exchange/ExchangeBufferTests.java | 93 +++++++++++++++++++ .../xpack/esql/action/EsqlActionIT.java | 1 - 3 files changed, 106 insertions(+), 11 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeBufferTests.java diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java index 930ced04636f8..df6c09ea1ff97 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeBuffer.java @@ -41,13 +41,12 @@ final class ExchangeBuffer { } void addPage(Page page) { + queue.add(page); + if (queueSize.incrementAndGet() == 1) { + notifyNotEmpty(); + } if (noMoreInputs) { - page.releaseBlocks(); - } else { - queue.add(page); - if (queueSize.incrementAndGet() == 1) { - notifyNotEmpty(); - } + discardPages(); } } @@ -115,13 +114,17 @@ SubscribableListener waitForReading() { } } + private void discardPages() { + Page p; + while ((p = pollPage()) != null) { + p.releaseBlocks(); + } + } + void finish(boolean drainingPages) { noMoreInputs = true; if (drainingPages) { - Page p; - while ((p = pollPage()) != null) { - p.releaseBlocks(); - } + discardPages(); } notifyNotEmpty(); if (drainingPages || queueSize.get() == 0) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeBufferTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeBufferTests.java new file mode 100644 index 0000000000000..4c975c6c07834 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeBufferTests.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator.exchange; + +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.BasicBlockTests; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.MockBlockFactory; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.equalTo; + +public class ExchangeBufferTests extends ESTestCase { + + public void testDrainPages() throws Exception { + ExchangeBuffer buffer = new ExchangeBuffer(randomIntBetween(10, 1000)); + var blockFactory = blockFactory(); + CountDownLatch latch = new CountDownLatch(1); + Thread[] producers = new Thread[between(1, 4)]; + AtomicBoolean stopped = new AtomicBoolean(); + AtomicInteger addedPages = new AtomicInteger(); + for (int t = 0; t < producers.length; t++) { + producers[t] = new Thread(() -> { + try { + latch.await(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + while (stopped.get() == false && addedPages.incrementAndGet() < 10_000) { + buffer.addPage(randomPage(blockFactory)); + } + }); + producers[t].start(); + } + latch.countDown(); + try { + int minPage = between(10, 100); + int receivedPage = 0; + while (receivedPage < minPage) { + Page p = buffer.pollPage(); + if (p != null) { + p.releaseBlocks(); + ++receivedPage; + } + } + } finally { + buffer.finish(true); + stopped.set(true); + } + for (Thread t : producers) { + t.join(); + } + assertThat(buffer.size(), equalTo(0)); + blockFactory.ensureAllBlocksAreReleased(); + } + + private static MockBlockFactory blockFactory() { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofGb(1)).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + return new MockBlockFactory(breaker, bigArrays); + } + + private static Page randomPage(BlockFactory blockFactory) { + Block block = BasicBlockTests.randomBlock( + blockFactory, + randomFrom(ElementType.LONG, ElementType.BYTES_REF, ElementType.BOOLEAN), + randomIntBetween(1, 100), + randomBoolean(), + 0, + between(1, 2), + 0, + between(1, 2) + ).block(); + return new Page(block); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index 0017a8600a013..2712ef8d2f59b 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -862,7 +862,6 @@ public void testFromStatsLimit() { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99826") public void testFromLimit() { try (EsqlQueryResponse results = run("from test | keep data | limit 2")) { logger.info(results); From 18c5246f1aa4bc3a08b2bade2cb67ef3b90e649c Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 11 Oct 2023 06:56:54 -0700 Subject: [PATCH 160/176] Disallow vectors whose magnitudes will not fit in a float (#100519) While we check for a magnitude to not be `0f`, we don't verify that it actually fits within a `float` value. This commit returns a failure and rejects `float` vectors whose magnitude don't fit within a 32bit `float` value. We don't support `float64` (aka `double`) values for vector search and should fail when a user attempts to index a vector that requires storing as `double`. closes: https://github.com/elastic/elasticsearch/issues/100471 --- docs/changelog/100519.yaml | 5 +++ .../vectors/DenseVectorFieldMapper.java | 13 ++++++- .../vectors/DenseVectorFieldMapperTests.java | 34 +++++++++++++++++++ 3 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/100519.yaml diff --git a/docs/changelog/100519.yaml b/docs/changelog/100519.yaml new file mode 100644 index 0000000000000..086c6962b3a95 --- /dev/null +++ b/docs/changelog/100519.yaml @@ -0,0 +1,5 @@ +pr: 100519 +summary: Disallow vectors whose magnitudes will not fit in a float +area: Vector Search +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index deb178ff724bb..ee144b25f4507 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -458,6 +458,15 @@ void checkVectorMagnitude( ) { StringBuilder errorBuilder = null; + if (Float.isNaN(squaredMagnitude) || Float.isInfinite(squaredMagnitude)) { + errorBuilder = new StringBuilder( + "NaN or Infinite magnitude detected, this usually means the vector values are too extreme to fit within a float." + ); + } + if (errorBuilder != null) { + throw new IllegalArgumentException(appender.apply(errorBuilder).toString()); + } + if (similarity == VectorSimilarity.DOT_PRODUCT && Math.abs(squaredMagnitude - 1.0f) > 1e-4f) { errorBuilder = new StringBuilder( "The [" + VectorSimilarity.DOT_PRODUCT + "] similarity can only be used with unit-length vectors." @@ -886,7 +895,9 @@ public Query createKnnQuery( } elementType.checkVectorBounds(queryVector); - if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { + if (similarity == VectorSimilarity.DOT_PRODUCT + || similarity == VectorSimilarity.COSINE + || similarity == VectorSimilarity.MAX_INNER_PRODUCT) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); elementType.checkVectorMagnitude(similarity, ElementType.errorFloatElementsAppender(queryVector), squaredMagnitude); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java index 183c0083c7da1..6d562f88a0100 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java @@ -413,6 +413,40 @@ public void testCosineWithZeroByteVector() throws Exception { ); } + public void testMaxInnerProductWithValidNorm() throws Exception { + DocumentMapper mapper = createDocumentMapper( + fieldMapping( + b -> b.field("type", "dense_vector") + .field("dims", 3) + .field("index", true) + .field("similarity", VectorSimilarity.MAX_INNER_PRODUCT) + ) + ); + float[] vector = { -12.1f, 2.7f, -4 }; + // Shouldn't throw + mapper.parse(source(b -> b.array("field", vector))); + } + + public void testWithExtremeFloatVector() throws Exception { + for (VectorSimilarity vs : List.of(VectorSimilarity.COSINE, VectorSimilarity.DOT_PRODUCT, VectorSimilarity.COSINE)) { + DocumentMapper mapper = createDocumentMapper( + fieldMapping(b -> b.field("type", "dense_vector").field("dims", 3).field("index", true).field("similarity", vs)) + ); + float[] vector = { 0.07247924f, -4.310546E-11f, -1.7255947E30f }; + DocumentParsingException e = expectThrows( + DocumentParsingException.class, + () -> mapper.parse(source(b -> b.array("field", vector))) + ); + assertNotNull(e.getCause()); + assertThat( + e.getCause().getMessage(), + containsString( + "NaN or Infinite magnitude detected, this usually means the vector values are too extreme to fit within a float." + ) + ); + } + } + public void testInvalidParameters() { MapperParsingException e = expectThrows( MapperParsingException.class, From 769c3f319c06ab256e3265cb7d1f778ae67570b8 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Wed, 11 Oct 2023 14:58:57 +0100 Subject: [PATCH 161/176] Don't use an asserting searcher at all in MatchingDirectoryReader (#100668) Follow up to #100527 We are not testing anything to do with searching with this searcher, and so there is no point in using LuceneTestCase.newSearcher() which will wrap it with all sorts of extra checks that may access the underlying reader in ways that are not anticipated by tests. Fixes #100460 Fixes #99024 --- .../java/org/elasticsearch/index/engine/EngineTestCase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 17f2303eb84c8..ab9d80b801863 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -1509,7 +1509,7 @@ public MatchingDirectoryReader(DirectoryReader in, Query query) throws IOExcepti @Override public LeafReader wrap(LeafReader leaf) { try { - final IndexSearcher searcher = newSearcher(leaf, false, true, false); + final IndexSearcher searcher = new IndexSearcher(leaf); searcher.setQueryCache(null); final Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1.0f); final Scorer scorer = weight.scorer(leaf.getContext()); From ae8ef6f534e986c40b9fc0b423fe3c7b8337b2f3 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 11 Oct 2023 15:08:31 +0100 Subject: [PATCH 162/176] [ML] Make ELSER settings serialisation compatible with backport (#100626) #100588 introduced a patch transport version, this PR adds the same patch version and updates the serialisation logic --- .../org/elasticsearch/TransportVersions.java | 1 + .../elser/ElserMlNodeServiceSettings.java | 17 +++++-- .../ElserMlNodeServiceSettingsTests.java | 48 +++++++++++++++++++ 3 files changed, 63 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 5d51a7959b5fa..6d323c4fc2ea7 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -134,6 +134,7 @@ static TransportVersion def(int id) { public static final TransportVersion NODE_INFO_REQUEST_SIMPLIFIED = def(8_510_00_0); public static final TransportVersion NESTED_KNN_VECTOR_QUERY_V = def(8_511_00_0); public static final TransportVersion ML_PACKAGE_LOADER_PLATFORM_ADDED = def(8_512_00_0); + public static final TransportVersion ELSER_SERVICE_MODEL_VERSION_ADDED_PATCH = def(8_512_00_1); public static final TransportVersion PLUGIN_DESCRIPTOR_OPTIONAL_CLASSNAME = def(8_513_00_0); public static final TransportVersion UNIVERSAL_PROFILING_LICENSE_ADDED = def(8_514_00_0); public static final TransportVersion ELSER_SERVICE_MODEL_VERSION_ADDED = def(8_515_00_0); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettings.java index 7dffbc693ca51..d1f27302f85f1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettings.java @@ -87,10 +87,21 @@ public ElserMlNodeServiceSettings(int numAllocations, int numThreads, String var public ElserMlNodeServiceSettings(StreamInput in) throws IOException { numAllocations = in.readVInt(); numThreads = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.ELSER_SERVICE_MODEL_VERSION_ADDED)) { + if (transportVersionIsCompatibleWithElserModelVersion(in.getTransportVersion())) { modelVariant = in.readString(); } else { - modelVariant = ElserMlNodeService.ELSER_V1_MODEL; + modelVariant = ElserMlNodeService.ELSER_V2_MODEL; + } + } + + static boolean transportVersionIsCompatibleWithElserModelVersion(TransportVersion transportVersion) { + var nextNonPatchVersion = TransportVersions.PLUGIN_DESCRIPTOR_OPTIONAL_CLASSNAME; + + if (transportVersion.onOrAfter(TransportVersions.ELSER_SERVICE_MODEL_VERSION_ADDED)) { + return true; + } else { + return transportVersion.onOrAfter(TransportVersions.ELSER_SERVICE_MODEL_VERSION_ADDED_PATCH) + && transportVersion.before(nextNonPatchVersion); } } @@ -130,7 +141,7 @@ public TransportVersion getMinimalSupportedVersion() { public void writeTo(StreamOutput out) throws IOException { out.writeVInt(numAllocations); out.writeVInt(numThreads); - if (out.getTransportVersion().onOrAfter(TransportVersions.ELSER_SERVICE_MODEL_VERSION_ADDED)) { + if (transportVersionIsCompatibleWithElserModelVersion(out.getTransportVersion())) { out.writeString(modelVariant); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettingsTests.java index 35d5c0b8e9603..8b6f3f1a56ba6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceSettingsTests.java @@ -7,10 +7,12 @@ package org.elasticsearch.xpack.inference.services.elser; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import java.io.IOException; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -85,6 +87,52 @@ public void testFromMapMissingOptions() { assertThat(e.getMessage(), containsString("[service_settings] does not contain the required setting [num_allocations]")); } + public void testTransportVersionIsCompatibleWithElserModelVersion() { + assertTrue( + ElserMlNodeServiceSettings.transportVersionIsCompatibleWithElserModelVersion( + TransportVersions.ELSER_SERVICE_MODEL_VERSION_ADDED + ) + ); + assertTrue( + ElserMlNodeServiceSettings.transportVersionIsCompatibleWithElserModelVersion( + TransportVersions.ELSER_SERVICE_MODEL_VERSION_ADDED_PATCH + ) + ); + + assertFalse( + ElserMlNodeServiceSettings.transportVersionIsCompatibleWithElserModelVersion(TransportVersions.ML_PACKAGE_LOADER_PLATFORM_ADDED) + ); + assertFalse( + ElserMlNodeServiceSettings.transportVersionIsCompatibleWithElserModelVersion( + TransportVersions.PLUGIN_DESCRIPTOR_OPTIONAL_CLASSNAME + ) + ); + assertFalse( + ElserMlNodeServiceSettings.transportVersionIsCompatibleWithElserModelVersion( + TransportVersions.UNIVERSAL_PROFILING_LICENSE_ADDED + ) + ); + } + + public void testBwcWrite() throws IOException { + { + var settings = new ElserMlNodeServiceSettings(1, 1, ".elser_model_1"); + var copy = copyInstance(settings, TransportVersions.ELSER_SERVICE_MODEL_VERSION_ADDED); + assertEquals(settings, copy); + } + { + var settings = new ElserMlNodeServiceSettings(1, 1, ".elser_model_1"); + var copy = copyInstance(settings, TransportVersions.PLUGIN_DESCRIPTOR_OPTIONAL_CLASSNAME); + assertNotEquals(settings, copy); + assertEquals(".elser_model_2", copy.getModelVariant()); + } + { + var settings = new ElserMlNodeServiceSettings(1, 1, ".elser_model_1"); + var copy = copyInstance(settings, TransportVersions.ELSER_SERVICE_MODEL_VERSION_ADDED_PATCH); + assertEquals(settings, copy); + } + } + public void testFromMapInvalidSettings() { var settingsMap = new HashMap( Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 0, ElserMlNodeServiceSettings.NUM_THREADS, -1) From 7a1784c279cb66ea271e8428ad8c257e16ec1024 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Wed, 11 Oct 2023 16:28:20 +0200 Subject: [PATCH 163/176] ESQL: Paginate MV_EXPAND output (#100598) --- .../compute/operator/MvExpandOperator.java | 188 +++++++++++++--- .../operator/MvExpandOperatorStatusTests.java | 20 +- .../operator/MvExpandOperatorTests.java | 213 ++++++++++++++---- .../compute/operator/OperatorTestCase.java | 8 +- .../src/main/resources/mv_expand.csv-spec | 74 ++++++ .../esql/planner/LocalExecutionPlanner.java | 3 +- 6 files changed, 413 insertions(+), 93 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java index f6156507dffa2..c322520d8853b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MvExpandOperator.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.Releasables; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -31,11 +32,12 @@ * 2 | 2 | "foo" * */ -public class MvExpandOperator extends AbstractPageMappingOperator { - public record Factory(int channel) implements OperatorFactory { +public class MvExpandOperator implements Operator { + + public record Factory(int channel, int blockSize) implements OperatorFactory { @Override public Operator get(DriverContext driverContext) { - return new MvExpandOperator(channel); + return new MvExpandOperator(channel, blockSize); } @Override @@ -46,49 +48,158 @@ public String describe() { private final int channel; + private final int pageSize; + private int noops; - public MvExpandOperator(int channel) { + private Page prev; + private boolean prevCompleted = false; + private boolean finished = false; + + private Block expandingBlock; + private Block expandedBlock; + + private int nextPositionToProcess = 0; + private int nextMvToProcess = 0; + private int nextItemOnExpanded = 0; + + /** + * Count of pages that have been processed by this operator. + */ + private int pagesIn; + private int pagesOut; + + public MvExpandOperator(int channel, int pageSize) { this.channel = channel; + this.pageSize = pageSize; + assert pageSize > 0; } @Override - protected Page process(Page page) { - Block expandingBlock = page.getBlock(channel); - Block expandedBlock = expandingBlock.expand(); + public final Page getOutput() { + if (prev == null) { + return null; + } + pagesOut++; + if (prev.getPositionCount() == 0 || expandingBlock.mayHaveMultivaluedFields() == false) { + noops++; + Page result = prev; + prev = null; + return result; + } + + try { + return process(); + } finally { + if (prevCompleted && prev != null) { + prev.releaseBlocks(); + prev = null; + } + } + } + + protected Page process() { if (expandedBlock == expandingBlock) { noops++; - return page; + prevCompleted = true; + return prev; } - if (page.getBlockCount() == 1) { + if (prev.getBlockCount() == 1) { assert channel == 0; + prevCompleted = true; return new Page(expandedBlock); } - int[] duplicateFilter = buildDuplicateExpandingFilter(expandingBlock, expandedBlock.getPositionCount()); + int[] duplicateFilter = nextDuplicateExpandingFilter(); - Block[] result = new Block[page.getBlockCount()]; + Block[] result = new Block[prev.getBlockCount()]; + int[] expandedMask = new int[duplicateFilter.length]; + for (int i = 0; i < expandedMask.length; i++) { + expandedMask[i] = i + nextItemOnExpanded; + } + nextItemOnExpanded += expandedMask.length; for (int b = 0; b < result.length; b++) { - result[b] = b == channel ? expandedBlock : page.getBlock(b).filter(duplicateFilter); + result[b] = b == channel ? expandedBlock.filter(expandedMask) : prev.getBlock(b).filter(duplicateFilter); + } + if (nextItemOnExpanded == expandedBlock.getPositionCount()) { + nextItemOnExpanded = 0; } return new Page(result); } - private int[] buildDuplicateExpandingFilter(Block expandingBlock, int newPositions) { - int[] duplicateFilter = new int[newPositions]; + private int[] nextDuplicateExpandingFilter() { + int[] duplicateFilter = new int[Math.min(pageSize, expandedBlock.getPositionCount() - nextPositionToProcess)]; int n = 0; - for (int p = 0; p < expandingBlock.getPositionCount(); p++) { - int count = expandingBlock.getValueCount(p); + while (true) { + int count = expandingBlock.getValueCount(nextPositionToProcess); int positions = count == 0 ? 1 : count; - Arrays.fill(duplicateFilter, n, n + positions, p); - n += positions; + int toAdd = Math.min(pageSize - n, positions - nextMvToProcess); + Arrays.fill(duplicateFilter, n, n + toAdd, nextPositionToProcess); + n += toAdd; + + if (n == pageSize) { + if (nextMvToProcess + toAdd == positions) { + // finished expanding this position, let's move on to next position (that will be expanded with next call) + nextMvToProcess = 0; + nextPositionToProcess++; + if (nextPositionToProcess == expandingBlock.getPositionCount()) { + nextPositionToProcess = 0; + prevCompleted = true; + } + } else { + // there are still items to expand in current position, but the duplicate filter is full, so we'll deal with them at + // next call + nextMvToProcess = nextMvToProcess + toAdd; + } + return duplicateFilter; + } + + nextMvToProcess = 0; + nextPositionToProcess++; + if (nextPositionToProcess == expandingBlock.getPositionCount()) { + nextPositionToProcess = 0; + nextMvToProcess = 0; + prevCompleted = true; + return n < pageSize ? Arrays.copyOfRange(duplicateFilter, 0, n) : duplicateFilter; + } } - return duplicateFilter; } @Override - protected AbstractPageMappingOperator.Status status(int pagesProcessed) { - return new Status(pagesProcessed, noops); + public final boolean needsInput() { + return prev == null && finished == false; + } + + @Override + public final void addInput(Page page) { + assert prev == null : "has pending input page"; + prev = page; + this.expandingBlock = prev.getBlock(channel); + this.expandedBlock = expandingBlock.expand(); + pagesIn++; + prevCompleted = false; + } + + @Override + public final void finish() { + finished = true; + } + + @Override + public final boolean isFinished() { + return finished && prev == null; + } + + @Override + public final Status status() { + return new Status(pagesIn, pagesOut, noops); + } + + @Override + public void close() { + if (prev != null) { + Releasables.closeExpectNoException(() -> prev.releaseBlocks()); + } } @Override @@ -96,35 +207,42 @@ public String toString() { return "MvExpandOperator[channel=" + channel + "]"; } - public static final class Status extends AbstractPageMappingOperator.Status { + public static final class Status implements Operator.Status { + + private final int pagesIn; + private final int pagesOut; + private final int noops; + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Operator.Status.class, "mv_expand", Status::new ); - private final int noops; - - Status(int pagesProcessed, int noops) { - super(pagesProcessed); + Status(int pagesIn, int pagesOut, int noops) { + this.pagesIn = pagesIn; + this.pagesOut = pagesOut; this.noops = noops; } Status(StreamInput in) throws IOException { - super(in); + pagesIn = in.readVInt(); + pagesOut = in.readVInt(); noops = in.readVInt(); } @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); + out.writeVInt(pagesIn); + out.writeVInt(pagesOut); out.writeVInt(noops); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("pages_processed", pagesProcessed()); + builder.field("pages_in", pagesIn); + builder.field("pages_out", pagesOut); builder.field("noops", noops); return builder.endObject(); } @@ -147,12 +265,20 @@ public boolean equals(Object o) { return false; } Status status = (Status) o; - return noops == status.noops && pagesProcessed() == status.pagesProcessed(); + return noops == status.noops && pagesIn == status.pagesIn && pagesOut == status.pagesOut; + } + + public int pagesIn() { + return pagesIn; + } + + public int pagesOut() { + return pagesOut; } @Override public int hashCode() { - return Objects.hash(noops, pagesProcessed()); + return Objects.hash(noops, pagesIn, pagesOut); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorStatusTests.java index fe281bbf16131..9527388a0d3cf 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorStatusTests.java @@ -16,12 +16,12 @@ public class MvExpandOperatorStatusTests extends AbstractWireSerializingTestCase { public static MvExpandOperator.Status simple() { - return new MvExpandOperator.Status(10, 9); + return new MvExpandOperator.Status(10, 15, 9); } public static String simpleToJson() { return """ - {"pages_processed":10,"noops":9}"""; + {"pages_in":10,"pages_out":15,"noops":9}"""; } public void testToXContent() { @@ -35,20 +35,28 @@ protected Writeable.Reader instanceReader() { @Override public MvExpandOperator.Status createTestInstance() { - return new MvExpandOperator.Status(randomNonNegativeInt(), randomNonNegativeInt()); + return new MvExpandOperator.Status(randomNonNegativeInt(), randomNonNegativeInt(), randomNonNegativeInt()); } @Override protected MvExpandOperator.Status mutateInstance(MvExpandOperator.Status instance) { - switch (between(0, 1)) { + switch (between(0, 2)) { case 0: return new MvExpandOperator.Status( - randomValueOtherThan(instance.pagesProcessed(), ESTestCase::randomNonNegativeInt), + randomValueOtherThan(instance.pagesIn(), ESTestCase::randomNonNegativeInt), + instance.pagesOut(), instance.noops() ); case 1: return new MvExpandOperator.Status( - instance.pagesProcessed(), + instance.pagesIn(), + randomValueOtherThan(instance.pagesOut(), ESTestCase::randomNonNegativeInt), + instance.noops() + ); + case 2: + return new MvExpandOperator.Status( + instance.pagesIn(), + instance.pagesOut(), randomValueOtherThan(instance.noops(), ESTestCase::randomNonNegativeInt) ); default: diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java index 69c965fc91323..f99685609ff78 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java @@ -9,17 +9,19 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.compute.data.BasicBlockTests; +import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; +import java.util.Iterator; import java.util.List; import static org.elasticsearch.compute.data.BasicBlockTests.randomBlock; import static org.elasticsearch.compute.data.BasicBlockTests.valuesAtPositions; +import static org.elasticsearch.compute.data.BlockTestUtils.deepCopyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -47,7 +49,7 @@ protected Page createPage(int positionOffset, int length) { @Override protected Operator.OperatorFactory simple(BigArrays bigArrays) { - return new MvExpandOperator.Factory(0); + return new MvExpandOperator.Factory(0, randomIntBetween(1, 1000)); } @Override @@ -60,47 +62,143 @@ protected String expectedToStringOfSimple() { return expectedDescriptionOfSimple(); } - @Override - protected void assertSimpleOutput(List input, List results) { - assertThat(results, hasSize(results.size())); - for (int i = 0; i < results.size(); i++) { - IntBlock origExpanded = input.get(i).getBlock(0); - IntBlock resultExpanded = results.get(i).getBlock(0); - int np = 0; - for (int op = 0; op < origExpanded.getPositionCount(); op++) { - if (origExpanded.isNull(op)) { - assertThat(resultExpanded.isNull(np), equalTo(true)); - assertThat(resultExpanded.getValueCount(np++), equalTo(0)); - continue; - } - List oValues = BasicBlockTests.valuesAtPositions(origExpanded, op, op + 1).get(0); - for (Object ov : oValues) { - assertThat(resultExpanded.isNull(np), equalTo(false)); - assertThat(resultExpanded.getValueCount(np), equalTo(1)); - assertThat(BasicBlockTests.valuesAtPositions(resultExpanded, np, ++np).get(0), equalTo(List.of(ov))); + class BlockListIterator implements Iterator { + private final Iterator pagesIterator; + private final int channel; + private Block currentBlock; + private int nextPosition; + + BlockListIterator(List pages, int channel) { + this.pagesIterator = pages.iterator(); + this.channel = channel; + this.currentBlock = pagesIterator.next().getBlock(channel); + this.nextPosition = 0; + } + + @Override + public boolean hasNext() { + if (currentBlock == null) { + return false; + } + + return currentBlock.getValueCount(nextPosition) == 0 + || nextPosition < currentBlock.getPositionCount() + || pagesIterator.hasNext(); + } + + @Override + public Object next() { + if (currentBlock != null && currentBlock.getValueCount(nextPosition) == 0) { + nextPosition++; + if (currentBlock.getPositionCount() == nextPosition) { + loadNextBlock(); } + return null; } + List items = valuesAtPositions(currentBlock, nextPosition, nextPosition + 1).get(0); + nextPosition++; + if (currentBlock.getPositionCount() == nextPosition) { + loadNextBlock(); + } + return items.size() == 1 ? items.get(0) : items; + } - IntBlock origDuplicated = input.get(i).getBlock(1); - IntBlock resultDuplicated = results.get(i).getBlock(1); - np = 0; - for (int op = 0; op < origDuplicated.getPositionCount(); op++) { - int copies = origExpanded.isNull(op) ? 1 : origExpanded.getValueCount(op); - for (int c = 0; c < copies; c++) { - if (origDuplicated.isNull(op)) { - assertThat(resultDuplicated.isNull(np), equalTo(true)); - assertThat(resultDuplicated.getValueCount(np++), equalTo(0)); - continue; - } - assertThat(resultDuplicated.isNull(np), equalTo(false)); - assertThat(resultDuplicated.getValueCount(np), equalTo(origDuplicated.getValueCount(op))); - assertThat( - BasicBlockTests.valuesAtPositions(resultDuplicated, np, ++np).get(0), - equalTo(BasicBlockTests.valuesAtPositions(origDuplicated, op, op + 1).get(0)) - ); + private void loadNextBlock() { + if (pagesIterator.hasNext() == false) { + currentBlock = null; + return; + } + this.currentBlock = pagesIterator.next().getBlock(channel); + nextPosition = 0; + } + } + + class BlockListIteratorExpander implements Iterator { + private final Iterator pagesIterator; + private final int channel; + private Block currentBlock; + private int nextPosition; + private int nextInPosition; + + BlockListIteratorExpander(List pages, int channel) { + this.pagesIterator = pages.iterator(); + this.channel = channel; + this.currentBlock = pagesIterator.next().getBlock(channel); + this.nextPosition = 0; + this.nextInPosition = 0; + } + + @Override + public boolean hasNext() { + if (currentBlock == null) { + return false; + } + + return currentBlock.getValueCount(nextPosition) == 0 + || nextInPosition < currentBlock.getValueCount(nextPosition) + || nextPosition < currentBlock.getPositionCount() + || pagesIterator.hasNext(); + } + + @Override + public Object next() { + if (currentBlock != null && currentBlock.getValueCount(nextPosition) == 0) { + nextPosition++; + if (currentBlock.getPositionCount() == nextPosition) { + loadNextBlock(); } + return null; + } + List items = valuesAtPositions(currentBlock, nextPosition, nextPosition + 1).get(0); + Object result = items == null ? null : items.get(nextInPosition++); + if (nextInPosition == currentBlock.getValueCount(nextPosition)) { + nextPosition++; + nextInPosition = 0; + } + if (currentBlock.getPositionCount() == nextPosition) { + loadNextBlock(); + } + return result; + } + + private void loadNextBlock() { + if (pagesIterator.hasNext() == false) { + currentBlock = null; + return; + } + this.currentBlock = pagesIterator.next().getBlock(channel); + nextPosition = 0; + nextInPosition = 0; + } + } + + @Override + protected void assertSimpleOutput(List input, List results) { + assertThat(results, hasSize(results.size())); + + var inputIter = new BlockListIteratorExpander(input, 0); + var resultIter = new BlockListIteratorExpander(results, 0); + + while (inputIter.hasNext()) { + assertThat(resultIter.hasNext(), equalTo(true)); + assertThat(resultIter.next(), equalTo(inputIter.next())); + } + assertThat(resultIter.hasNext(), equalTo(false)); + + var originalMvIter = new BlockListIterator(input, 0); + var inputIter2 = new BlockListIterator(input, 1); + var resultIter2 = new BlockListIterator(results, 1); + + while (originalMvIter.hasNext()) { + Object originalMv = originalMvIter.next(); + int originalMvSize = originalMv instanceof List l ? l.size() : 1; + assertThat(resultIter2.hasNext(), equalTo(true)); + Object inputValue = inputIter2.next(); + for (int j = 0; j < originalMvSize; j++) { + assertThat(resultIter2.next(), equalTo(inputValue)); } } + assertThat(resultIter2.hasNext(), equalTo(false)); } @Override @@ -110,7 +208,7 @@ protected ByteSizeValue smallEnoughToCircuitBreak() { } public void testNoopStatus() { - MvExpandOperator op = new MvExpandOperator(0); + MvExpandOperator op = new MvExpandOperator(0, randomIntBetween(1, 1000)); List result = drive( op, List.of(new Page(IntVector.newVectorBuilder(2).appendInt(1).appendInt(2).build().asBlock())).iterator(), @@ -118,26 +216,45 @@ public void testNoopStatus() { ); assertThat(result, hasSize(1)); assertThat(valuesAtPositions(result.get(0).getBlock(0), 0, 2), equalTo(List.of(List.of(1), List.of(2)))); - MvExpandOperator.Status status = (MvExpandOperator.Status) op.status(); - assertThat(status.pagesProcessed(), equalTo(1)); + MvExpandOperator.Status status = op.status(); + assertThat(status.pagesIn(), equalTo(1)); + assertThat(status.pagesOut(), equalTo(1)); assertThat(status.noops(), equalTo(1)); } public void testExpandStatus() { - MvExpandOperator op = new MvExpandOperator(0); + MvExpandOperator op = new MvExpandOperator(0, randomIntBetween(1, 1)); var builder = IntBlock.newBlockBuilder(2).beginPositionEntry().appendInt(1).appendInt(2).endPositionEntry(); List result = drive(op, List.of(new Page(builder.build())).iterator(), driverContext()); assertThat(result, hasSize(1)); assertThat(valuesAtPositions(result.get(0).getBlock(0), 0, 2), equalTo(List.of(List.of(1), List.of(2)))); - MvExpandOperator.Status status = (MvExpandOperator.Status) op.status(); - assertThat(status.pagesProcessed(), equalTo(1)); + MvExpandOperator.Status status = op.status(); + assertThat(status.pagesIn(), equalTo(1)); + assertThat(status.pagesOut(), equalTo(1)); assertThat(status.noops(), equalTo(0)); } - // TODO: remove this once possible - // https://github.com/elastic/elasticsearch/issues/99826 - @Override - protected boolean canLeak() { - return true; + public void testExpandWithBytesRefs() { + DriverContext context = driverContext(); + List input = CannedSourceOperator.collectPages(new AbstractBlockSourceOperator(context.blockFactory(), 8 * 1024) { + private int idx; + + @Override + protected int remaining() { + return 10000 - idx; + } + + @Override + protected Page createPage(int positionOffset, int length) { + idx += length; + return new Page( + randomBlock(context.blockFactory(), ElementType.BYTES_REF, length, true, 1, 10, 0, 0).block(), + randomBlock(context.blockFactory(), ElementType.INT, length, false, 1, 10, 0, 0).block() + ); + } + }); + List origInput = deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List results = drive(new MvExpandOperator(0, randomIntBetween(1, 1000)), input.iterator(), context); + assertSimpleOutput(origInput, results); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java index 63f601669636c..5d881f03bd07f 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java @@ -212,7 +212,7 @@ protected final void assertSimple(DriverContext context, int size) { unreleasedInputs++; } } - if ((canLeak() == false) && unreleasedInputs > 0) { + if (unreleasedInputs > 0) { throw new AssertionError("[" + unreleasedInputs + "] unreleased input blocks"); } } @@ -308,12 +308,6 @@ protected void start(Driver driver, ActionListener driverListener) { } } - // TODO: Remove this once all operators do not leak anymore - // https://github.com/elastic/elasticsearch/issues/99826 - protected boolean canLeak() { - return false; - } - public static void assertDriverContext(DriverContext driverContext) { assertTrue(driverContext.isFinished()); assertThat(driverContext.getSnapshot().releasables(), empty()); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec index 7cc11c6fab5b3..ae27e8f56f9f7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mv_expand.csv-spec @@ -24,3 +24,77 @@ a:integer | b:keyword | j:keyword 3 | b | "a" 3 | b | "b" ; + + +explosion +row +a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +c = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +d = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +e = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +f = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +g = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +x = 10000000000000 +| mv_expand a | mv_expand b | mv_expand c | mv_expand d | mv_expand e | mv_expand f | mv_expand g +| limit 10; + +a:integer | b:integer | c:integer | d:integer | e:integer | f:integer | g:integer | x:long +1 | 1 | 1 | 1 | 1 | 1 | 1 | 10000000000000 +1 | 1 | 1 | 1 | 1 | 1 | 2 | 10000000000000 +1 | 1 | 1 | 1 | 1 | 1 | 3 | 10000000000000 +1 | 1 | 1 | 1 | 1 | 1 | 4 | 10000000000000 +1 | 1 | 1 | 1 | 1 | 1 | 5 | 10000000000000 +1 | 1 | 1 | 1 | 1 | 1 | 6 | 10000000000000 +1 | 1 | 1 | 1 | 1 | 1 | 7 | 10000000000000 +1 | 1 | 1 | 1 | 1 | 1 | 8 | 10000000000000 +1 | 1 | 1 | 1 | 1 | 1 | 9 | 10000000000000 +1 | 1 | 1 | 1 | 1 | 1 | 10 | 10000000000000 +; + + +explosionStats +row +a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +b = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +c = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +d = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +e = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], +x = 10000000000000 +| mv_expand a | mv_expand b | mv_expand c | mv_expand d | mv_expand e +| stats sum_a = sum(a) by b +| sort b; + +//12555000 = sum(1..30) * 30 * 30 * 30 +sum_a:long | b:integer +12555000 | 1 +12555000 | 2 +12555000 | 3 +12555000 | 4 +12555000 | 5 +12555000 | 6 +12555000 | 7 +12555000 | 8 +12555000 | 9 +12555000 | 10 +12555000 | 11 +12555000 | 12 +12555000 | 13 +12555000 | 14 +12555000 | 15 +12555000 | 16 +12555000 | 17 +12555000 | 18 +12555000 | 19 +12555000 | 20 +12555000 | 21 +12555000 | 22 +12555000 | 23 +12555000 | 24 +12555000 | 25 +12555000 | 26 +12555000 | 27 +12555000 | 28 +12555000 | 29 +12555000 | 30 +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index b86072e1b6da0..bdc1c948f2055 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -584,7 +584,8 @@ private PhysicalOperation planLimit(LimitExec limit, LocalExecutionPlannerContex private PhysicalOperation planMvExpand(MvExpandExec mvExpandExec, LocalExecutionPlannerContext context) { PhysicalOperation source = plan(mvExpandExec.child(), context); - return source.with(new MvExpandOperator.Factory(source.layout.get(mvExpandExec.target().id()).channel()), source.layout); + int blockSize = 5000;// TODO estimate row size and use context.pageSize() + return source.with(new MvExpandOperator.Factory(source.layout.get(mvExpandExec.target().id()).channel(), blockSize), source.layout); } /** From 9a8503678bc5841e2e3da1198af9cfce246f21a0 Mon Sep 17 00:00:00 2001 From: Julia Bardi <90178898+juliaElastic@users.noreply.github.com> Date: Wed, 11 Oct 2023 16:54:56 +0200 Subject: [PATCH 164/176] added read and delete privilege (#100684) --- .../authz/store/KibanaOwnedReservedRoleDescriptors.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 579638f474b21..dcd7e106b2e81 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -194,7 +194,10 @@ static RoleDescriptor kibanaSystem(String name) { // Fleet telemetry queries Agent Logs indices in kibana task runner RoleDescriptor.IndicesPrivileges.builder().indices("logs-elastic_agent*").privileges("read").build(), // Fleet publishes Agent metrics in kibana task runner - RoleDescriptor.IndicesPrivileges.builder().indices("metrics-fleet_server*").privileges("auto_configure", "write").build(), + RoleDescriptor.IndicesPrivileges.builder() + .indices("metrics-fleet_server*") + .privileges("auto_configure", "read", "write", "delete") + .build(), // Legacy "Alerts as data" used in Security Solution. // Kibana user creates these indices; reads / writes to them. RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.ALERTS_LEGACY_INDEX).privileges("all").build(), From 8a6df32de66f99314de28dabf360c5cf43b2d2a9 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 11 Oct 2023 17:02:27 +0200 Subject: [PATCH 165/176] Update gradle wrapper to 8.4 (#99856) * Remove deprecated forConfigurationTime usage --- .../gradle/wrapper/gradle-wrapper.properties | 4 ++-- .../src/main/resources/minimumGradleVersion | 2 +- gradle/wrapper/gradle-wrapper.properties | 4 ++-- gradlew | 14 +++++++------- .../gradle/wrapper/gradle-wrapper.properties | 4 ++-- x-pack/plugin/eql/qa/correctness/build.gradle | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties index 6c7fa4d4653d2..01f330a93e8fa 100644 --- a/build-tools-internal/gradle/wrapper/gradle-wrapper.properties +++ b/build-tools-internal/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=bb09982fdf52718e4c7b25023d10df6d35a5fff969860bdf5a5bd27a3ab27a9e -distributionUrl=https\://services.gradle.org/distributions/gradle-8.3-all.zip +distributionSha256Sum=f2b9ed0faf8472cbe469255ae6c86eddb77076c75191741b4a462f33128dd419 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/build-tools-internal/src/main/resources/minimumGradleVersion b/build-tools-internal/src/main/resources/minimumGradleVersion index 223a939307878..fad03000495ca 100644 --- a/build-tools-internal/src/main/resources/minimumGradleVersion +++ b/build-tools-internal/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -8.3 \ No newline at end of file +8.4 \ No newline at end of file diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 6c7fa4d4653d2..01f330a93e8fa 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=bb09982fdf52718e4c7b25023d10df6d35a5fff969860bdf5a5bd27a3ab27a9e -distributionUrl=https\://services.gradle.org/distributions/gradle-8.3-all.zip +distributionSha256Sum=f2b9ed0faf8472cbe469255ae6c86eddb77076c75191741b4a462f33128dd419 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/gradlew b/gradlew index 0adc8e1a53214..1aa94a4269074 100755 --- a/gradlew +++ b/gradlew @@ -145,7 +145,7 @@ if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then case $MAX_FD in #( max*) # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 + # shellcheck disable=SC2039,SC3045 MAX_FD=$( ulimit -H -n ) || warn "Could not query maximum file descriptor limit" esac @@ -153,7 +153,7 @@ if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then '' | soft) :;; #( *) # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 + # shellcheck disable=SC2039,SC3045 ulimit -n "$MAX_FD" || warn "Could not set maximum file descriptor limit to $MAX_FD" esac @@ -202,11 +202,11 @@ fi # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' -# Collect all arguments for the java command; -# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of -# shell script including quotes and variable substitutions, so put them in -# double quotes to make sure that they get re-expanded; and -# * put everything else in single quotes, so that it's not re-expanded. +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. set -- \ "-Dorg.gradle.appname=$APP_BASE_NAME" \ diff --git a/plugins/examples/gradle/wrapper/gradle-wrapper.properties b/plugins/examples/gradle/wrapper/gradle-wrapper.properties index 6c7fa4d4653d2..01f330a93e8fa 100644 --- a/plugins/examples/gradle/wrapper/gradle-wrapper.properties +++ b/plugins/examples/gradle/wrapper/gradle-wrapper.properties @@ -1,7 +1,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionSha256Sum=bb09982fdf52718e4c7b25023d10df6d35a5fff969860bdf5a5bd27a3ab27a9e -distributionUrl=https\://services.gradle.org/distributions/gradle-8.3-all.zip +distributionSha256Sum=f2b9ed0faf8472cbe469255ae6c86eddb77076c75191741b4a462f33128dd419 +distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-all.zip networkTimeout=10000 validateDistributionUrl=true zipStoreBase=GRADLE_USER_HOME diff --git a/x-pack/plugin/eql/qa/correctness/build.gradle b/x-pack/plugin/eql/qa/correctness/build.gradle index 4a72f66c238e3..0008c30f260d6 100644 --- a/x-pack/plugin/eql/qa/correctness/build.gradle +++ b/x-pack/plugin/eql/qa/correctness/build.gradle @@ -14,7 +14,7 @@ dependencies { } File serviceAccountFile = providers.environmentVariable('eql_test_credentials_file') - .orElse(providers.systemProperty('eql.test.credentials.file').forUseAtConfigurationTime()) + .orElse(providers.systemProperty('eql.test.credentials.file')) .map { s -> new File(s)} .getOrNull() From 446ac9f378ade47a31ffc9b645dea894f6ea8731 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Wed, 11 Oct 2023 17:33:20 +0200 Subject: [PATCH 166/176] [DOCS] Updates ELSER tutorial with inference processor changes (#100420) Co-authored-by: Abdon Pijpelink --- .../ingest/processors/inference.asciidoc | 36 ++-- .../semantic-search-elser.asciidoc | 166 +++++++++--------- .../semantic-search/field-mappings.asciidoc | 2 +- .../generate-embeddings.asciidoc | 40 ++--- .../semantic-search/hybrid-search.asciidoc | 2 +- .../semantic-search/search.asciidoc | 6 +- 6 files changed, 130 insertions(+), 122 deletions(-) diff --git a/docs/reference/ingest/processors/inference.asciidoc b/docs/reference/ingest/processors/inference.asciidoc index 75b667e634cdb..5f0fedfd7902c 100644 --- a/docs/reference/ingest/processors/inference.asciidoc +++ b/docs/reference/ingest/processors/inference.asciidoc @@ -15,20 +15,27 @@ ingested in the pipeline. .{infer-cap} Options [options="header"] |====== -| Name | Required | Default | Description -| `model_id` . | yes | - | (String) The ID or alias for the trained model, or the ID of the deployment. -| `input_output` | no | - | (List) Input fields for inference and output (destination) fields for the inference results. This options is incompatible with the `target_field` and `field_map` options. -| `target_field` | no | `ml.inference.` | (String) Field added to incoming documents to contain results objects. -| `field_map` | no | If defined the model's default field map | (Object) Maps the document field names to the known field names of the model. This mapping takes precedence over any default mappings provided in the model configuration. +| Name | Required | Default | Description +| `model_id` . | yes | - | (String) The ID or alias for the trained model, or the ID of the deployment. +| `input_output` | no | - | (List) Input fields for {infer} and output (destination) fields for the {infer} results. This option is incompatible with the `target_field` and `field_map` options. +| `target_field` | no | `ml.inference.` | (String) Field added to incoming documents to contain results objects. +| `field_map` | no | If defined the model's default field map | (Object) Maps the document field names to the known field names of the model. This mapping takes precedence over any default mappings provided in the model configuration. | `inference_config` | no | The default settings defined in the model | (Object) Contains the inference type and its options. -| `ignore_missing` | no | `false` | (Boolean) If `true` and any of the input fields defined in `input_ouput` are missing then those missing fields are quietly ignored, otherwise a missing field causes a failure. Only applies when using `input_output` configurations to explicitly list the input fields. +| `ignore_missing` | no | `false` | (Boolean) If `true` and any of the input fields defined in `input_ouput` are missing then those missing fields are quietly ignored, otherwise a missing field causes a failure. Only applies when using `input_output` configurations to explicitly list the input fields. include::common-options.asciidoc[] |====== +IMPORTANT: You cannot use the `input_output` field with the `target_field` and +`field_map` fields. For NLP models, use the `input_output` option. For +{dfanalytics} models, use the `target_field` and `field_map` option. + + [discrete] [[inference-input-output-example]] ==== Configuring input and output fields -Select the `content` field for inference and write the result to `content_embedding`. + +Select the `content` field for inference and write the result to +`content_embedding`. [source,js] -------------------------------------------------- @@ -47,9 +54,11 @@ Select the `content` field for inference and write the result to `content_embedd // NOTCONSOLE ==== Configuring multiple inputs -The `content` and `title` fields will be read from the incoming document -and sent to the model for the inference. The inference output is written -to `content_embedding` and `title_embedding` respectively. + +The `content` and `title` fields will be read from the incoming document and +sent to the model for the inference. The inference output is written to +`content_embedding` and `title_embedding` respectively. + [source,js] -------------------------------------------------- { @@ -73,9 +82,9 @@ to `content_embedding` and `title_embedding` respectively. Selecting the input fields with `input_output` is incompatible with the `target_field` and `field_map` options. -Data frame analytics models must use the `target_field` to specify the -root location results are written to and optionally a `field_map` to map -field names in the input document to the model input fields. +{dfanalytics-cap} models must use the `target_field` to specify the root +location results are written to and optionally a `field_map` to map field names +in the input document to the model input fields. [source,js] -------------------------------------------------- @@ -92,6 +101,7 @@ field names in the input document to the model input fields. -------------------------------------------------- // NOTCONSOLE + [discrete] [[inference-processor-classification-opt]] ==== {classification-cap} configuration options diff --git a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc index 0b4956dbe86ad..2449f26d8dac3 100644 --- a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc @@ -45,8 +45,8 @@ you must provide suitably sized nodes yourself. First, the mapping of the destination index - the index that contains the tokens that the model created based on your text - must be created. The destination index must have a field with the -<> or <> field type to index the -ELSER output. +<> or <> field +type to index the ELSER output. NOTE: ELSER output must be ingested into a field with the `sparse_vector` or `rank_features` field type. Otherwise, {es} interprets the token-weight pairs as @@ -61,10 +61,10 @@ PUT my-index { "mappings": { "properties": { - "ml.tokens": { <1> + "content_embedding": { <1> "type": "sparse_vector" <2> }, - "text": { <3> + "content": { <3> "type": "text" <4> } } @@ -72,10 +72,12 @@ PUT my-index } ---- // TEST[skip:TBD] -<1> The name of the field to contain the generated tokens. +<1> The name of the field to contain the generated tokens. It must be refrenced +in the {infer} pipeline configuration in the next step. <2> The field to contain the tokens is a `sparse_vector` field. -<3> The name of the field from which to create the sparse vector representation. -In this example, the name of the field is `text`. +<3> The name of the field from which to create the sparse vector representation. +In this example, the name of the field is `content`. It must be referenced in the +{infer} pipeline configuration in the next step. <4> The field type which is text in this example. To learn how to optimize space, refer to the <> section. @@ -91,32 +93,33 @@ that is being ingested in the pipeline. [source,console] ---- -PUT _ingest/pipeline/elser-v2-test -{ - "processors": [ - { - "inference": { - "model_id": ".elser_model_2", - "target_field": "ml", - "field_map": { <1> - "text": "text_field" - }, - "inference_config": { - "text_expansion": { <2> - "results_field": "tokens" - } - } - } - } - ] +PUT _ingest/pipeline/elser-v2-test +{ + "processors": [ + { + "inference": { + "model_id": ".elser_model_2", + "input_output": [ <1> + { + "input_field": "content", + "output_field": "content_embedding" + } + ] + } + } + ] } ---- -// TEST[skip:TBD] -<1> The `field_map` object maps the input document field name (which is `text` -in this example) to the name of the field that the model expects (which is -always `text_field`). -<2> The `text_expansion` inference type needs to be used in the {infer} ingest -processor. +<1> Configuration object that defines the `input_field` for the {infer} process +and the `output_field` that will contain the {infer} results. + +//// +[source,console] +---- +DELETE _ingest/pipeline/elser-v2-test +---- +// TEST[continued] +//// [discrete] @@ -134,9 +137,9 @@ https://github.com/elastic/stack-docs/blob/main/docs/en/stack/ml/nlp/data/msmarc Download the file and upload it to your cluster using the {kibana-ref}/connect-to-elasticsearch.html#upload-data-kibana[Data Visualizer] -in the {ml-app} UI. Assign the name `id` to the first column and `text` to the -second column. The index name is `test-data`. Once the upload is complete, you -can see an index named `test-data` with 182469 documents. +in the {ml-app} UI. Assign the name `id` to the first column and `content` to +the second column. The index name is `test-data`. Once the upload is complete, +you can see an index named `test-data` with 182469 documents. [discrete] @@ -181,10 +184,10 @@ follow the progress. [[text-expansion-query]] ==== Semantic search by using the `text_expansion` query -To perform semantic search, use the `text_expansion` query, and provide the -query text and the ELSER model ID. The example below uses the query text "How to -avoid muscle soreness after running?", the `ml.tokens` field contains the -generated ELSER output: +To perform semantic search, use the `text_expansion` query, and provide the +query text and the ELSER model ID. The example below uses the query text "How to +avoid muscle soreness after running?", the `content_embedding` field contains +the generated ELSER output: [source,console] ---- @@ -192,7 +195,7 @@ GET my-index/_search { "query":{ "text_expansion":{ - "ml.tokens":{ + "content_embedding":{ "model_id":".elser_model_2", "model_text":"How to avoid muscle soreness after running?" } @@ -209,40 +212,41 @@ weights. [source,consol-result] ---- -"hits":[ - { - "_index":"my-index", - "_id":"978UAYgBKCQMet06sLEy", - "_score":18.612831, - "_ignored":[ - "text.keyword" - ], - "_source":{ - "id":7361587, - "text":"For example, if you go for a run, you will mostly use the muscles in your lower body. Give yourself 2 days to rest those muscles so they have a chance to heal before you exercise them again. Not giving your muscles enough time to rest can cause muscle damage, rather than muscle development.", - "ml":{ - "tokens":{ - "muscular":0.075696334, - "mostly":0.52380747, - "practice":0.23430172, - "rehab":0.3673556, - "cycling":0.13947526, - "your":0.35725075, - "years":0.69484913, - "soon":0.005317828, - "leg":0.41748235, - "fatigue":0.3157955, - "rehabilitation":0.13636169, - "muscles":1.302141, - "exercises":0.36694175, - (...) - }, - "model_id":".elser_model_2" - } +"hits": { + "total": { + "value": 10000, + "relation": "gte" + }, + "max_score": 26.199875, + "hits": [ + { + "_index": "my-index", + "_id": "FPr9HYsBag9jXmT8lEpI", + "_score": 26.199875, + "_source": { + "content_embedding": { + "muscular": 0.2821541, + "bleeding": 0.37929374, + "foods": 1.1718726, + "delayed": 1.2112266, + "cure": 0.6848574, + "during": 0.5886185, + "fighting": 0.35022718, + "rid": 0.2752442, + "soon": 0.2967024, + "leg": 0.37649947, + "preparation": 0.32974035, + "advance": 0.09652356, + (...) + }, + "id": 1713868, + "model_id": ".elser_model_2", + "content": "For example, if you go for a run, you will mostly use the muscles in your lower body. Give yourself 2 days to rest those muscles so they have a chance to heal before you exercise them again. Not giving your muscles enough time to rest can cause muscle damage, rather than muscle development." } - }, - (...) -] + }, + (...) + ] +} ---- // NOTCONSOLE @@ -274,8 +278,8 @@ GET my-index/_search "bool": { <1> "should": [ { - "text_expansion": { - "ml.tokens": { + "text_expansion": { + "content_embedding": { "model_text": "How to avoid muscle soreness after running?", "model_id": ".elser_model_2", "boost": 1 <2> @@ -328,8 +332,8 @@ reindexing will not be required in the future! It's important to carefully consider this trade-off and make sure that excluding the ELSER terms from the source aligns with your specific requirements and use case. -The mapping that excludes `ml.tokens` from the `_source` field can be created -by the following API call: +The mapping that excludes `content_embedding` from the `_source` field can be +created by the following API call: [source,console] ---- @@ -338,15 +342,15 @@ PUT my-index "mappings": { "_source": { "excludes": [ - "ml.tokens" + "content_embedding" ] }, "properties": { - "ml.tokens": { - "type": "sparse_vector" + "content_embedding": { + "type": "sparse_vector" }, - "text": { - "type": "text" + "content": { + "type": "text" } } } diff --git a/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc b/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc index c93ddc3a803a7..2fe2f9cea83f9 100644 --- a/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/field-mappings.asciidoc @@ -17,7 +17,7 @@ PUT my-index { "mappings": { "properties": { - "my_embeddings.tokens": { <1> + "my_tokens": { <1> "type": "sparse_vector" <2> }, "my_text_field": { <3> diff --git a/docs/reference/tab-widgets/semantic-search/generate-embeddings.asciidoc b/docs/reference/tab-widgets/semantic-search/generate-embeddings.asciidoc index 786f40fe141bd..caf6523783b02 100644 --- a/docs/reference/tab-widgets/semantic-search/generate-embeddings.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/generate-embeddings.asciidoc @@ -15,32 +15,26 @@ This is how an ingest pipeline that uses the ELSER model is created: [source,console] ---- -PUT _ingest/pipeline/my-text-embeddings-pipeline -{ +PUT _ingest/pipeline/my-text-embeddings-pipeline +{ "description": "Text embedding pipeline", - "processors": [ - { - "inference": { - "model_id": ".elser_model_2", - "target_field": "my_embeddings", - "field_map": { <1> - "my_text_field": "text_field" - }, - "inference_config": { - "text_expansion": { <2> - "results_field": "tokens" - } - } - } - } - ] + "processors": [ + { + "inference": { + "model_id": ".elser_model_2", + "input_output": [ <1> + { + "input_field": "my_text_field", + "output_field": "my_tokens" + } + ] + } + } + ] } ---- -<1> The `field_map` object maps the input document field name (which is -`my_text_field` in this example) to the name of the field that the model expects -(which is always `text_field`). -<2> The `text_expansion` inference type needs to be used in the inference ingest -processor. +<1> Configuration object that defines the `input_field` for the {infer} process +and the `output_field` that will contain the {infer} results. To ingest data through the pipeline to generate tokens with ELSER, refer to the <> section of the tutorial. After you successfully diff --git a/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc b/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc index a99bdf3c8722b..f7d9ee1ad6443 100644 --- a/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/hybrid-search.asciidoc @@ -21,7 +21,7 @@ GET my-index/_search { "query": { "text_expansion": { - "my_embeddings.tokens": { + "my_tokens": { "model_id": ".elser_model_2", "model_text": "the query string" } diff --git a/docs/reference/tab-widgets/semantic-search/search.asciidoc b/docs/reference/tab-widgets/semantic-search/search.asciidoc index d1cd31fbe4309..315328add07f0 100644 --- a/docs/reference/tab-widgets/semantic-search/search.asciidoc +++ b/docs/reference/tab-widgets/semantic-search/search.asciidoc @@ -2,8 +2,8 @@ ELSER text embeddings can be queried using a <>. The text expansion -query enables you to query a rank features field, by providing the model ID of -the NLP model, and the query text: +query enables you to query a rank features field or a sparse vector field, by +providing the model ID of the NLP model, and the query text: [source,console] ---- @@ -11,7 +11,7 @@ GET my-index/_search { "query":{ "text_expansion":{ - "my_embeddings.tokens":{ <1> + "my_tokens":{ <1> "model_id":".elser_model_2", "model_text":"the query string" } From 72f29c9e079c02b8d8097bda1bd0fce03aa1286f Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 11 Oct 2023 08:50:57 -0700 Subject: [PATCH 167/176] Fix node version settings for SearchShardsResponseTests (#100691) similar to: https://github.com/elastic/elasticsearch/pull/100557 --- .../search/SearchShardsResponseTests.java | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchShardsResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchShardsResponseTests.java index 450cbdba85473..6d9f3dfaefcf0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchShardsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchShardsResponseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -23,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.query.RandomQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.shard.ShardId; @@ -30,7 +32,6 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.TransportVersionUtils; -import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -39,6 +40,7 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -109,16 +111,14 @@ protected SearchShardsResponse mutateInstance(SearchShardsResponse r) throws IOE } public void testLegacyResponse() { - DiscoveryNode node1 = DiscoveryNodeUtils.create( - "node-1", - new TransportAddress(TransportAddress.META_ADDRESS, randomInt(0xFFFF)), - VersionUtils.randomVersion(random()) - ); - DiscoveryNode node2 = DiscoveryNodeUtils.create( - "node-2", - new TransportAddress(TransportAddress.META_ADDRESS, randomInt(0xFFFF)), - VersionUtils.randomVersion(random()) - ); + DiscoveryNode node1 = DiscoveryNodeUtils.builder("node-1") + .address(new TransportAddress(TransportAddress.META_ADDRESS, randomInt(0xFFFF))) + .version(randomCompatibleVersion(random(), Version.CURRENT), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); + DiscoveryNode node2 = DiscoveryNodeUtils.builder("node-2") + .address(new TransportAddress(TransportAddress.META_ADDRESS, randomInt(0xFFFF))) + .version(randomCompatibleVersion(random(), Version.CURRENT), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()) + .build(); final ClusterSearchShardsGroup[] groups = new ClusterSearchShardsGroup[2]; { ShardId shardId = new ShardId("index-1", "uuid-1", 0); From 6a902684c5d2e3d768f98078521e552a91db9ce7 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 11 Oct 2023 08:51:25 -0700 Subject: [PATCH 168/176] Exclude BWC tests in platform support testing matrix (#100643) --- .buildkite/pipelines/periodic-platform-support.yml | 4 +--- build.gradle | 6 ++++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index 08c9fda4c9a6b..206ea7efb2042 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -2,7 +2,7 @@ steps: - group: platform-support-unix steps: - label: "{{matrix.image}} / platform-support-unix" - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true check + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true platformSupportTests timeout_in_minutes: 420 matrix: setup: @@ -46,7 +46,6 @@ steps: - checkPart1 - checkPart2 - checkPart3 - - bwcTestSnapshots - checkRestCompat agents: provider: gcp @@ -70,7 +69,6 @@ steps: - checkPart1 - checkPart2 - checkPart3 - - bwcTestSnapshots - checkRestCompat agents: provider: aws diff --git a/build.gradle b/build.gradle index c33489c46b53c..4a5cf6cb03cfa 100644 --- a/build.gradle +++ b/build.gradle @@ -186,8 +186,8 @@ if (bwc_tests_enabled == false) { println "See ${bwc_tests_disabled_issue}" println "===========================================================" } -if (project.gradle.startParameter.taskNames.find { it.startsWith("checkPart") } != null) { - // Disable BWC tests for checkPart* tasks as it's expected that this will run un it's own check +if (project.gradle.startParameter.taskNames.any { it.startsWith("checkPart") || it == 'platformSupportTests' }) { + // Disable BWC tests for checkPart* tasks and platform support tests as it's expected that this will run on it's own check bwc_tests_enabled = false } @@ -255,6 +255,8 @@ allprojects { } else { tasks.register('checkPart1') { dependsOn 'check' } } + + tasks.register('platformSupportTests') { dependsOn 'check'} } /* From add3fdc50f8819795532b3313d7870e484c86fec Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 11 Oct 2023 11:03:29 -0500 Subject: [PATCH 169/176] Allowing a warning in a 230_change_target_index yaml rest test (#100686) This adds an allowed warning when an index template is created in `Test Change Target Index with Default Pipeline`. This appears to happen more frequently now that we're running in the new test framework (https://github.com/elastic/elasticsearch/pull/100537). The index template creation was also just recently added in https://github.com/elastic/elasticsearch/pull/100540. Closes #100584 --- .../rest-api-spec/test/ingest/230_change_target_index.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/230_change_target_index.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/230_change_target_index.yml index 0114484e723a5..de0abb0ebedd9 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/230_change_target_index.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/230_change_target_index.yml @@ -102,6 +102,8 @@ teardown: "Test Change Target Index with Default Pipeline": - do: + allowed_warnings: + - "index template [index_template] has index patterns [test] matching patterns from existing older templates [global] with patterns (global => [*]); this template [index_template] will take precedence during new index creation" indices.put_index_template: name: index_template body: From bd4d45a5739bf662d4e09f7134c3ba97c804bfe7 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 11 Oct 2023 11:11:17 -0500 Subject: [PATCH 170/176] Avoiding deleting SLM history indices in rest test cases because they are written to asynchronously (#100694) If an SLM policy happens to run during a rest test, an slm history index will be created, which then causes a failure during test cleanup like: ``` org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT > test {yaml=reference/query-dsl/simple-query-string-query/line_291} FAILED org.elasticsearch.client.ResponseException: method [DELETE], host [http://127.0.0.1:43027], URI [*,-.ds-ilm-history-*?expand_wildcards=open%2Cclosed%2Chidden], status line [HTTP/1.1 400 Bad Request] {"error":{"root_cause":[{"type":"illegal_argument_exception","reason":"index [.ds-.slm-history-5-2023.10.09-000001] is the write index for data stream [.slm-history-5] and cannot be deleted"}],"type":"illegal_argument_exception","reason":"index [.ds-.slm-history-5-2023.10.09-000001] is the write index for data stream [.slm-history-5] and cannot be deleted"},"status":400} at __randomizedtesting.SeedInfo.seed([E12124F261F0BF90:69751B28CF0CD268]:0) at app//org.elasticsearch.client.RestClient.convertResponse(RestClient.java:347) at app//org.elasticsearch.client.RestClient.performRequest(RestClient.java:313) at app//org.elasticsearch.client.RestClient.performRequest(RestClient.java:288) at app//org.elasticsearch.test.rest.ESRestTestCase.wipeAllIndices(ESRestTestCase.java:981) at app//org.elasticsearch.test.rest.ESRestTestCase.wipeCluster(ESRestTestCase.java:729) at app//org.elasticsearch.test.rest.ESRestTestCase.cleanUpCluster(ESRestTestCase.java:421) at java.base@21/jdk.internal.reflect.DirectMethodHandleAccessor.invoke(DirectMethodHandleAccessor.java:103) at java.base@21/java.lang.reflect.Method.invoke(Method.java:580) at app//com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1758) ``` This PR excludes the SLM history index from deletion the same way we exclude the ILM history index. Closes #100536 --- .../main/java/org/elasticsearch/test/rest/ESRestTestCase.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index a79d952ac5b27..444a0c6d752dc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -971,8 +971,8 @@ protected static void wipeAllIndices() throws IOException { protected static void wipeAllIndices(boolean preserveSecurityIndices) throws IOException { boolean includeHidden = minimumNodeVersion().onOrAfter(Version.V_7_7_0); try { - // remove all indices except ilm history which can pop up after deleting all data streams but shouldn't interfere - final List indexPatterns = new ArrayList<>(List.of("*", "-.ds-ilm-history-*")); + // remove all indices except ilm and slm history which can pop up after deleting all data streams but shouldn't interfere + final List indexPatterns = new ArrayList<>(List.of("*", "-.ds-ilm-history-*", "-.ds-.slm-history-*")); if (preserveSecurityIndices) { indexPatterns.add("-.security-*"); } From 4bcde0d26173c707c35fafe1e19865c33b2ed489 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 11 Oct 2023 09:38:39 -0700 Subject: [PATCH 171/176] Allow release vector without release its block (#100688) Today, a vector must be closed via its block. This PR removes this restriction as there are cases where we only interact with the vector. --- .../compute/data/BooleanBigArrayVector.java | 4 ++++ .../compute/data/BooleanVectorBlock.java | 7 ++++++- .../compute/data/BytesRefArrayVector.java | 4 ++++ .../compute/data/BytesRefVectorBlock.java | 7 ++++++- .../compute/data/ConstantBooleanVector.java | 4 ++++ .../compute/data/ConstantBytesRefVector.java | 4 ++++ .../compute/data/ConstantDoubleVector.java | 4 ++++ .../compute/data/ConstantIntVector.java | 4 ++++ .../compute/data/ConstantLongVector.java | 4 ++++ .../compute/data/DoubleBigArrayVector.java | 4 ++++ .../compute/data/DoubleVectorBlock.java | 7 ++++++- .../compute/data/IntBigArrayVector.java | 4 ++++ .../elasticsearch/compute/data/IntVectorBlock.java | 7 ++++++- .../compute/data/LongBigArrayVector.java | 4 ++++ .../compute/data/LongVectorBlock.java | 7 ++++++- .../elasticsearch/compute/data/AbstractVector.java | 9 +++++++++ .../org/elasticsearch/compute/data/Vector.java | 5 +++++ .../compute/data/X-ArrayVector.java.st | 4 ++++ .../compute/data/X-BigArrayVector.java.st | 4 ++++ .../compute/data/X-ConstantVector.java.st | 4 ++++ .../compute/data/X-VectorBlock.java.st | 7 ++++++- .../compute/data/BlockFactoryTests.java | 14 ++++++++++++++ 22 files changed, 116 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java index 13dd594a548ec..5ad88ab1ac6e9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java @@ -71,6 +71,10 @@ public BooleanVector filter(int... positions) { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index 383543f1451ff..1a7a5b4aa6e7e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -70,9 +70,14 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } + @Override + public boolean isReleased() { + return released || vector.isReleased(); + } + @Override public void close() { - if (released) { + if (released || vector.isReleased()) { throw new IllegalStateException("can't release already released block [" + this + "]"); } released = true; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java index cabe8b86be2bf..02ab9a09b15e1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java @@ -94,6 +94,10 @@ public String toString() { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; blockFactory.adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); Releasables.closeExpectNoException(values); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index 38a2243e1f532..5b0f2f2331fbe 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -71,9 +71,14 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } + @Override + public boolean isReleased() { + return released || vector.isReleased(); + } + @Override public void close() { - if (released) { + if (released || vector.isReleased()) { throw new IllegalStateException("can't release already released block [" + this + "]"); } released = true; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java index 2671b0d87b573..e4f6e6f144abe 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java @@ -80,6 +80,10 @@ public String toString() { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; blockFactory.adjustBreaker(-ramBytesUsed(), true); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java index 6fc64a6891c32..f5f6e7945d03b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java @@ -85,6 +85,10 @@ public String toString() { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; blockFactory.adjustBreaker(-ramBytesUsed(), true); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java index 9d6650ebe41a8..05e71f3853155 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java @@ -80,6 +80,10 @@ public String toString() { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; blockFactory.adjustBreaker(-ramBytesUsed(), true); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java index aeb178d70690b..3f1eb45843c66 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java @@ -80,6 +80,10 @@ public String toString() { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; blockFactory.adjustBreaker(-ramBytesUsed(), true); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java index ec6bf84a84e32..08840c3772a9e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java @@ -80,6 +80,10 @@ public String toString() { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; blockFactory.adjustBreaker(-ramBytesUsed(), true); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java index 20f5a65d8d34c..d50e8adbbd37d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java @@ -69,6 +69,10 @@ public DoubleVector filter(int... positions) { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index 2c25313e97f29..d05be62744bc8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -70,9 +70,14 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } + @Override + public boolean isReleased() { + return released || vector.isReleased(); + } + @Override public void close() { - if (released) { + if (released || vector.isReleased()) { throw new IllegalStateException("can't release already released block [" + this + "]"); } released = true; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java index 718b07274d366..3bb9461300ee4 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java @@ -69,6 +69,10 @@ public IntVector filter(int... positions) { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index e2fc6b3313d6a..472475d0662d7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -70,9 +70,14 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } + @Override + public boolean isReleased() { + return released || vector.isReleased(); + } + @Override public void close() { - if (released) { + if (released || vector.isReleased()) { throw new IllegalStateException("can't release already released block [" + this + "]"); } released = true; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java index 99179cb6d87e3..ccf4a6944b60e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java @@ -69,6 +69,10 @@ public LongVector filter(int... positions) { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index f8c0d6d1df417..b94cd4e875dc3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -70,9 +70,14 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } + @Override + public boolean isReleased() { + return released || vector.isReleased(); + } + @Override public void close() { - if (released) { + if (released || vector.isReleased()) { throw new IllegalStateException("can't release already released block [" + this + "]"); } released = true; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java index a0335bb5c24e7..d7bda952bdcd0 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java @@ -14,6 +14,7 @@ abstract class AbstractVector implements Vector { private final int positionCount; protected final BlockFactory blockFactory; + protected boolean released; protected AbstractVector(int positionCount, BlockFactory blockFactory) { this.positionCount = positionCount; @@ -36,7 +37,15 @@ public BlockFactory blockFactory() { @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; blockFactory.adjustBreaker(-ramBytesUsed(), true); } + @Override + public final boolean isReleased() { + return released; + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java index c9ecf1aa9e399..e2cea86a5a38f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java @@ -60,4 +60,9 @@ interface Builder extends Releasable { */ Vector build(); } + + /** + * Whether this vector was released + */ + boolean isReleased(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st index 07ec2230deee1..02a876142fb0d 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st @@ -125,6 +125,10 @@ $endif$ $if(BytesRef)$ @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; blockFactory.adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); Releasables.closeExpectNoException(values); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st index e448d917a65ce..5bf629cec61d3 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st @@ -78,6 +78,10 @@ public final class $Type$BigArrayVector extends AbstractVector implements $Type$ @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st index 36384f3996f55..b80188cefba2e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st @@ -104,6 +104,10 @@ $endif$ @Override public void close() { + if (released) { + throw new IllegalStateException("can't release already released vector [" + this + "]"); + } + released = true; blockFactory.adjustBreaker(-ramBytesUsed(), true); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 57bb09c87e39e..3ef4251f80684 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -78,9 +78,14 @@ $endif$ return getClass().getSimpleName() + "[vector=" + vector + "]"; } + @Override + public boolean isReleased() { + return released || vector.isReleased(); + } + @Override public void close() { - if (released) { + if (released || vector.isReleased()) { throw new IllegalStateException("can't release already released block [" + this + "]"); } released = true; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java index 9c6c9d966b3f6..88a584ac5ee44 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java @@ -30,6 +30,7 @@ import java.util.function.Supplier; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; @@ -554,6 +555,19 @@ public void testBytesRefVectorBuilderWithPossiblyLargeEstimateRandom() { } } + public void testReleaseVector() { + int positionCount = randomIntBetween(1, 10); + IntVector vector = blockFactory.newIntArrayVector(new int[positionCount], positionCount); + if (randomBoolean()) { + vector.asBlock().close(); + } else { + vector.close(); + } + assertTrue(vector.isReleased()); + assertTrue(vector.asBlock().isReleased()); + assertThat(breaker.getUsed(), equalTo(0L)); + } + static BytesRef randomBytesRef() { return new BytesRef(randomByteArrayOfLength(between(1, 20))); } From 731c253058099dfbbe4f3dcf0d1b8a286e33ac27 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 11 Oct 2023 19:33:23 +0200 Subject: [PATCH 172/176] Update spotless plugin (#100666) - Support java 21 - fix configuration cache incompatiliby --- gradle/build.versions.toml | 2 +- gradle/verification-metadata.xml | 75 +++++++++++++++++++++++++++++--- 2 files changed, 71 insertions(+), 6 deletions(-) diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index 4de1dc680064a..4472fd635f905 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -40,6 +40,6 @@ shadow-plugin = "com.github.johnrengelman:shadow:8.1.1" spock-core = { group = "org.spockframework", name="spock-core", version.ref="spock" } spock-junit4 = { group = "org.spockframework", name="spock-junit4", version.ref="spock" } spock-platform = { group = "org.spockframework", name="spock-bom", version.ref="spock" } -spotless-plugin = "com.diffplug.spotless:spotless-plugin-gradle:6.18.0" +spotless-plugin = "com.diffplug.spotless:spotless-plugin-gradle:6.22.0" wiremock = "com.github.tomakehurst:wiremock-jre8-standalone:2.23.2" xmlunit-core = "org.xmlunit:xmlunit-core:2.8.2" diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 8fcb2fecad971..3200cd14ae3a8 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -36,11 +36,6 @@ - - - - - @@ -194,6 +189,16 @@ + + + + + + + + + + @@ -209,11 +214,21 @@ + + + + + + + + + + @@ -221,6 +236,11 @@ + + + + + @@ -516,6 +536,11 @@ + + + + + @@ -676,6 +701,11 @@ + + + + + @@ -876,6 +906,11 @@ + + + + + @@ -891,6 +926,11 @@ + + + + + @@ -1046,6 +1086,11 @@ + + + + + @@ -1151,6 +1196,11 @@ + + + + + @@ -3267,6 +3317,11 @@ + + + + + @@ -3477,6 +3532,11 @@ + + + + + @@ -3487,6 +3547,11 @@ + + + + + From 81a441e636fd4e54b77e512da4165d9b200ccafb Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 11 Oct 2023 14:11:27 -0400 Subject: [PATCH 173/176] [buildkite] Add more logging and debug information to PR pipeline generation (#100709) --- .../__snapshots__/pipeline.test.ts.snap | 300 ++++++++++-------- .../scripts/pull-request/pipeline.generate.ts | 16 +- .buildkite/scripts/pull-request/pipeline.sh | 3 + .buildkite/scripts/pull-request/pipeline.ts | 12 +- 4 files changed, 182 insertions(+), 149 deletions(-) diff --git a/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap b/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap index 39cd3fe07beb4..6df8ca8b63438 100644 --- a/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap +++ b/.buildkite/scripts/pull-request/__snapshots__/pipeline.test.ts.snap @@ -3,47 +3,53 @@ exports[`generatePipelines should generate correct pipelines with a non-docs change 1`] = ` [ { - "steps": [ - { - "group": "bwc-snapshots", - "steps": [ - { - "agents": { - "buildDirectory": "/dev/shm/bk", - "image": "family/elasticsearch-ubuntu-2004", - "machineType": "custom-32-98304", - "provider": "gcp", - }, - "command": ".ci/scripts/run-gradle.sh -Dignore.tests.seed v{{matrix.BWC_VERSION}}#bwcTest", - "env": { - "BWC_VERSION": "{{matrix.BWC_VERSION}}", - }, - "label": "{{matrix.BWC_VERSION}} / bwc-snapshots", - "matrix": { - "setup": { - "BWC_VERSION": [ - "7.17.14", - "8.10.3", - "8.11.0", - ], + "name": "bwc-snapshots", + "pipeline": { + "steps": [ + { + "group": "bwc-snapshots", + "steps": [ + { + "agents": { + "buildDirectory": "/dev/shm/bk", + "image": "family/elasticsearch-ubuntu-2004", + "machineType": "custom-32-98304", + "provider": "gcp", + }, + "command": ".ci/scripts/run-gradle.sh -Dignore.tests.seed v{{matrix.BWC_VERSION}}#bwcTest", + "env": { + "BWC_VERSION": "{{matrix.BWC_VERSION}}", }, + "label": "{{matrix.BWC_VERSION}} / bwc-snapshots", + "matrix": { + "setup": { + "BWC_VERSION": [ + "7.17.14", + "8.10.3", + "8.11.0", + ], + }, + }, + "timeout_in_minutes": 300, }, - "timeout_in_minutes": 300, - }, - ], - }, - ], + ], + }, + ], + }, }, { - "env": { - "CUSTOM_ENV_VAR": "value", - }, - "steps": [ - { - "command": "echo 'hello world'", - "label": "test-step", + "name": "using-defaults", + "pipeline": { + "env": { + "CUSTOM_ENV_VAR": "value", }, - ], + "steps": [ + { + "command": "echo 'hello world'", + "label": "test-step", + }, + ], + }, }, ] `; @@ -51,19 +57,22 @@ exports[`generatePipelines should generate correct pipelines with a non-docs cha exports[`generatePipelines should generate correct pipelines with only docs changes 1`] = ` [ { - "steps": [ - { - "agents": { - "buildDirectory": "/dev/shm/bk", - "image": "family/elasticsearch-ubuntu-2004", - "machineType": "custom-32-98304", - "provider": "gcp", + "name": "docs-check", + "pipeline": { + "steps": [ + { + "agents": { + "buildDirectory": "/dev/shm/bk", + "image": "family/elasticsearch-ubuntu-2004", + "machineType": "custom-32-98304", + "provider": "gcp", + }, + "command": ".ci/scripts/run-gradle.sh -Dignore.tests.seed precommit :docs:check", + "label": "docs-check", + "timeout_in_minutes": 300, }, - "command": ".ci/scripts/run-gradle.sh -Dignore.tests.seed precommit :docs:check", - "label": "docs-check", - "timeout_in_minutes": 300, - }, - ], + ], + }, }, ] `; @@ -71,99 +80,105 @@ exports[`generatePipelines should generate correct pipelines with only docs chan exports[`generatePipelines should generate correct pipelines with full BWC expansion 1`] = ` [ { - "steps": [ - { - "group": "bwc", - "steps": [ - { - "agents": { - "buildDirectory": "/dev/shm/bk", - "image": "family/elasticsearch-ubuntu-2004", - "machineType": "custom-32-98304", - "provider": "gcp", - }, - "command": ".ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.0.0#bwcTest", - "env": { - "BWC_VERSION": "7.0.0", - }, - "key": "full-bwc:7_0_0", - "label": "7.0.0 / bwc", - "timeout_in_minutes": 300, - }, - { - "agents": { - "buildDirectory": "/dev/shm/bk", - "image": "family/elasticsearch-ubuntu-2004", - "machineType": "custom-32-98304", - "provider": "gcp", - }, - "command": ".ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.0.1#bwcTest", - "env": { - "BWC_VERSION": "7.0.1", - }, - "key": "full-bwc:7_0_1", - "label": "7.0.1 / bwc", - "timeout_in_minutes": 300, - }, - { - "agents": { - "buildDirectory": "/dev/shm/bk", - "image": "family/elasticsearch-ubuntu-2004", - "machineType": "custom-32-98304", - "provider": "gcp", - }, - "command": ".ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.1.0#bwcTest", - "env": { - "BWC_VERSION": "7.1.0", + "name": "full-bwc", + "pipeline": { + "steps": [ + { + "group": "bwc", + "steps": [ + { + "agents": { + "buildDirectory": "/dev/shm/bk", + "image": "family/elasticsearch-ubuntu-2004", + "machineType": "custom-32-98304", + "provider": "gcp", + }, + "command": ".ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.0.0#bwcTest", + "env": { + "BWC_VERSION": "7.0.0", + }, + "key": "full-bwc:7_0_0", + "label": "7.0.0 / bwc", + "timeout_in_minutes": 300, }, - "key": "full-bwc:7_1_0", - "label": "7.1.0 / bwc", - "timeout_in_minutes": 300, - }, - { - "agents": { - "buildDirectory": "/dev/shm/bk", - "image": "family/elasticsearch-ubuntu-2004", - "machineType": "custom-32-98304", - "provider": "gcp", + { + "agents": { + "buildDirectory": "/dev/shm/bk", + "image": "family/elasticsearch-ubuntu-2004", + "machineType": "custom-32-98304", + "provider": "gcp", + }, + "command": ".ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.0.1#bwcTest", + "env": { + "BWC_VERSION": "7.0.1", + }, + "key": "full-bwc:7_0_1", + "label": "7.0.1 / bwc", + "timeout_in_minutes": 300, }, - "command": ".ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.10.0#bwcTest", - "env": { - "BWC_VERSION": "8.10.0", + { + "agents": { + "buildDirectory": "/dev/shm/bk", + "image": "family/elasticsearch-ubuntu-2004", + "machineType": "custom-32-98304", + "provider": "gcp", + }, + "command": ".ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.1.0#bwcTest", + "env": { + "BWC_VERSION": "7.1.0", + }, + "key": "full-bwc:7_1_0", + "label": "7.1.0 / bwc", + "timeout_in_minutes": 300, }, - "key": "full-bwc:8_10_0", - "label": "8.10.0 / bwc", - "timeout_in_minutes": 300, - }, - { - "agents": { - "buildDirectory": "/dev/shm/bk", - "image": "family/elasticsearch-ubuntu-2004", - "machineType": "custom-32-98304", - "provider": "gcp", + { + "agents": { + "buildDirectory": "/dev/shm/bk", + "image": "family/elasticsearch-ubuntu-2004", + "machineType": "custom-32-98304", + "provider": "gcp", + }, + "command": ".ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.10.0#bwcTest", + "env": { + "BWC_VERSION": "8.10.0", + }, + "key": "full-bwc:8_10_0", + "label": "8.10.0 / bwc", + "timeout_in_minutes": 300, }, - "command": ".ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest", - "env": { - "BWC_VERSION": "8.11.0", + { + "agents": { + "buildDirectory": "/dev/shm/bk", + "image": "family/elasticsearch-ubuntu-2004", + "machineType": "custom-32-98304", + "provider": "gcp", + }, + "command": ".ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest", + "env": { + "BWC_VERSION": "8.11.0", + }, + "key": "full-bwc:8_11_0", + "label": "8.11.0 / bwc", + "timeout_in_minutes": 300, }, - "key": "full-bwc:8_11_0", - "label": "8.11.0 / bwc", - "timeout_in_minutes": 300, - }, - ], - }, - ], + ], + }, + ], + }, }, { - "env": { - "CUSTOM_ENV_VAR": "value", - }, - "steps": [ - { - "command": "echo 'hello world'", - "label": "test-step", + "name": "using-defaults", + "pipeline": { + "env": { + "CUSTOM_ENV_VAR": "value", }, - ], + "steps": [ + { + "command": "echo 'hello world'", + "label": "test-step", + }, + ], + }, }, ] `; @@ -171,15 +186,18 @@ exports[`generatePipelines should generate correct pipelines with full BWC expan exports[`generatePipelines should generate correct pipeline when using a trigger comment for it 1`] = ` [ { - "env": { - "CUSTOM_ENV_VAR": "value", - }, - "steps": [ - { - "command": "echo 'hello world'", - "label": "test-step", + "name": "using-defaults", + "pipeline": { + "env": { + "CUSTOM_ENV_VAR": "value", }, - ], + "steps": [ + { + "command": "echo 'hello world'", + "label": "test-step", + }, + ], + }, }, ] `; diff --git a/.buildkite/scripts/pull-request/pipeline.generate.ts b/.buildkite/scripts/pull-request/pipeline.generate.ts index 69caff990dcfe..aec50dcdaece0 100644 --- a/.buildkite/scripts/pull-request/pipeline.generate.ts +++ b/.buildkite/scripts/pull-request/pipeline.generate.ts @@ -6,13 +6,19 @@ import { generatePipelines } from "./pipeline"; const pipelines = generatePipelines(); for (const pipeline of pipelines) { - if (!process.env.CI) { - // Just for local debugging purposes + const yaml = stringify(pipeline.pipeline); + + console.log(`--- Generated pipeline: ${pipeline.name}`); + console.log(yaml); + + // Only do the pipeline upload if we're actually in CI + // This lets us run the tool locally and see the output + if (process.env.CI) { console.log(""); - console.log(stringify(pipeline)); - } else { + console.log("Uploading pipeline..."); + execSync(`buildkite-agent pipeline upload`, { - input: stringify(pipeline), + input: yaml, stdio: ["pipe", "inherit", "inherit"], }); } diff --git a/.buildkite/scripts/pull-request/pipeline.sh b/.buildkite/scripts/pull-request/pipeline.sh index 77bbc1e115430..3012308fbcace 100755 --- a/.buildkite/scripts/pull-request/pipeline.sh +++ b/.buildkite/scripts/pull-request/pipeline.sh @@ -2,5 +2,8 @@ set -euo pipefail +echo --- Installing bun npm install -g bun + +echo --- Generating pipeline bun .buildkite/scripts/pull-request/pipeline.generate.ts diff --git a/.buildkite/scripts/pull-request/pipeline.ts b/.buildkite/scripts/pull-request/pipeline.ts index a6a3df6ff2aff..600e0373d9cfc 100644 --- a/.buildkite/scripts/pull-request/pipeline.ts +++ b/.buildkite/scripts/pull-request/pipeline.ts @@ -116,6 +116,7 @@ export const generatePipelines = ( .filter((x) => x); if (!changedFiles?.length) { + console.log("Doing git fetch and getting merge-base"); const mergeBase = execSync( `git fetch origin ${process.env["GITHUB_PR_TARGET_BRANCH"]}; git merge-base origin/${process.env["GITHUB_PR_TARGET_BRANCH"]} HEAD`, { cwd: PROJECT_ROOT } @@ -123,12 +124,17 @@ export const generatePipelines = ( .toString() .trim(); + console.log(`Merge base: ${mergeBase}`); + const changedFilesOutput = execSync(`git diff --name-only ${mergeBase}`, { cwd: PROJECT_ROOT }).toString().trim(); changedFiles = changedFilesOutput .split("\n") .map((x) => x.trim()) .filter((x) => x); + + console.log("Changed files (first 50):"); + console.log(changedFiles.slice(0, 50).join("\n")); } let filters: ((pipeline: EsPipeline) => boolean)[] = [ @@ -154,9 +160,9 @@ export const generatePipelines = ( pipelines.sort((a, b) => (a.name ?? "").localeCompare(b.name ?? "")); const finalPipelines = pipelines.map((pipeline) => { - const finalPipeline = { ...pipeline }; - delete finalPipeline.config; - delete finalPipeline.name; + const finalPipeline = { name: pipeline.name, pipeline: { ...pipeline } }; + delete finalPipeline.pipeline.config; + delete finalPipeline.pipeline.name; return finalPipeline; }); From 1c1f0fd24d8f61975e87512d9bc5b28bb0588d2f Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 11 Oct 2023 13:18:34 -0500 Subject: [PATCH 174/176] Adding replica shards for ingest-common yaml rest tests (#100638) --- .../IngestCommonClientYamlTestSuiteIT.java | 19 ++++++++++++++----- .../test/ingest/190_script_processor.yml | 4 ++-- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/modules/ingest-common/src/yamlRestTest/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java b/modules/ingest-common/src/yamlRestTest/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java index 889fa0b101c06..598d171bb328b 100644 --- a/modules/ingest-common/src/yamlRestTest/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java +++ b/modules/ingest-common/src/yamlRestTest/java/org/elasticsearch/ingest/common/IngestCommonClientYamlTestSuiteIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterSpecBuilder; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; @@ -39,11 +40,19 @@ protected Settings restClientSettings() { } @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .distribution(DistributionType.DEFAULT) - .setting("xpack.security.enabled", "true") - .user("x_pack_rest_user", "x-pack-test-password") - .build(); + public static ElasticsearchCluster cluster = createCluster(); + + private static ElasticsearchCluster createCluster() { + LocalClusterSpecBuilder clusterBuilder = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.security.enabled", "true") + .user("x_pack_rest_user", "x-pack-test-password"); + boolean setNodes = Boolean.parseBoolean(System.getProperty("yaml.rest.tests.set_num_nodes", "true")); + if (setNodes) { + clusterBuilder.nodes(2); + } + return clusterBuilder.build(); + } @Override protected String getTestRestCluster() { diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml index d833e0111f83b..53928894ceec7 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml @@ -414,7 +414,7 @@ teardown: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: append: @@ -513,7 +513,7 @@ teardown: body: settings: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 default_pipeline: nested_documents mappings: properties: From b95149cd874d43d269319950f525da770e953892 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 11 Oct 2023 11:28:34 -0700 Subject: [PATCH 175/176] Allow enrich_user to read/view enrich indices (#100707) > Unexpected error from Elasticsearch: security_exception - action [indices:data/read/esql/lookup] is unauthorized for user [guest] with effective roles [enrich_user,esql-read-role] on restricted indices [.enrich-group_lookup-1696927917972], this action is granted by the index privileges [read,all] Currently, the enrich indices (.enrich-*) are restricted system indices managed by the enrich plugin. While the `enrich_user` should not be allowed to manage or write to these indices, they should be allowed to read and view_index_metadata. This is necessary for ESQL; otherwise, ESQL users would require broader privileges to perform enrich in ESQL. --- docs/changelog/100707.yaml | 5 +++++ .../authz/store/ReservedRolesStore.java | 7 ++++++- .../authz/store/ReservedRolesStoreTests.java | 18 ++++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/100707.yaml diff --git a/docs/changelog/100707.yaml b/docs/changelog/100707.yaml new file mode 100644 index 0000000000000..6808b781b603a --- /dev/null +++ b/docs/changelog/100707.yaml @@ -0,0 +1,5 @@ +pr: 100707 +summary: Allow `enrich_user` to read/view enrich indices +area: Authorization +type: bug +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 4cb88b22b5599..243e151370516 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -607,7 +607,12 @@ private static Map initializeReservedRoles() { "enrich_user", new String[] { "manage_enrich", "manage_ingest_pipelines", "monitor" }, new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder().indices(".enrich-*").privileges("manage", "read", "write").build() }, + RoleDescriptor.IndicesPrivileges.builder() + .indices(".enrich-*") + .privileges("read", "view_index_metadata") + .allowRestrictedIndices(true) + .build(), + RoleDescriptor.IndicesPrivileges.builder().indices(".enrich-*").privileges("manage", "write").build() }, null, MetadataUtils.DEFAULT_RESERVED_METADATA ) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 41e89a4403d17..ce09af33e0bac 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -3417,6 +3417,24 @@ public void testIncludeReservedRoles() { ); } + public void testEnrichUserRole() { + final TransportRequest request = mock(TransportRequest.class); + final Authentication authentication = AuthenticationTestHelper.builder().build(); + + RoleDescriptor roleDescriptor = ReservedRolesStore.roleDescriptor("enrich_user"); + assertNotNull(roleDescriptor); + assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); + + Role role = Role.buildFromRoleDescriptor(roleDescriptor, new FieldPermissionsCache(Settings.EMPTY), RESTRICTED_INDICES); + assertTrue(role.cluster().check("cluster:admin/xpack/enrich/put", request, authentication)); + assertTrue(role.cluster().check("cluster:admin/xpack/enrich/execute", request, authentication)); + assertTrue(role.cluster().check("cluster:admin/xpack/enrich/esql/resolve", request, authentication)); + assertTrue(role.cluster().check("cluster:admin/xpack/enrich/esql/lookup", request, authentication)); + assertFalse(role.runAs().check(randomAlphaOfLengthBetween(1, 30))); + assertFalse(role.indices().allowedIndicesMatcher(IndexAction.NAME).test(mockIndexAbstraction("foo"))); + assertOnlyReadAllowed(role, ".enrich-foo"); + } + private IndexAbstraction mockIndexAbstraction(String name) { IndexAbstraction mock = mock(IndexAbstraction.class); when(mock.getName()).thenReturn(name); From 6eb4c8ead6d05a9cfb2d2225c5b2d0bea866ef71 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 11 Oct 2023 11:35:08 -0700 Subject: [PATCH 176/176] Ensure release vector builder in QuantileStates (#100693) Ensure that we always release the vector builders in case we hit the breaker in QuantileStates. --- .../compute/aggregation/QuantileStates.java | 77 ++++++++++--------- 1 file changed, 40 insertions(+), 37 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java index 9f4bfc58d0073..121e80871aaf0 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java @@ -161,58 +161,61 @@ TDigestState getOrNull(int position) { @Override public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { assert blocks.length >= offset + 1; - var builder = BytesRefBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - for (int i = 0; i < selected.getPositionCount(); i++) { - int group = selected.getInt(i); - TDigestState state; - if (group < digests.size()) { - state = getOrNull(group); - if (state == null) { + try (var builder = BytesRefBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int group = selected.getInt(i); + TDigestState state; + if (group < digests.size()) { + state = getOrNull(group); + if (state == null) { + state = TDigestState.create(DEFAULT_COMPRESSION); + } + } else { state = TDigestState.create(DEFAULT_COMPRESSION); } - } else { - state = TDigestState.create(DEFAULT_COMPRESSION); + builder.appendBytesRef(serializeDigest(state)); } - builder.appendBytesRef(serializeDigest(state)); + blocks[offset] = builder.build(); } - blocks[offset] = builder.build(); } Block evaluateMedianAbsoluteDeviation(IntVector selected, DriverContext driverContext) { assert percentile == MEDIAN : "Median must be 50th percentile [percentile = " + percentile + "]"; - final DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - for (int i = 0; i < selected.getPositionCount(); i++) { - int si = selected.getInt(i); - if (si >= digests.size()) { - builder.appendNull(); - continue; - } - final TDigestState digest = digests.get(si); - if (digest != null && digest.size() > 0) { - builder.appendDouble(InternalMedianAbsoluteDeviation.computeMedianAbsoluteDeviation(digest)); - } else { - builder.appendNull(); + try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int si = selected.getInt(i); + if (si >= digests.size()) { + builder.appendNull(); + continue; + } + final TDigestState digest = digests.get(si); + if (digest != null && digest.size() > 0) { + builder.appendDouble(InternalMedianAbsoluteDeviation.computeMedianAbsoluteDeviation(digest)); + } else { + builder.appendNull(); + } } + return builder.build(); } - return builder.build(); } Block evaluatePercentile(IntVector selected, DriverContext driverContext) { - final DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - for (int i = 0; i < selected.getPositionCount(); i++) { - int si = selected.getInt(i); - if (si >= digests.size()) { - builder.appendNull(); - continue; - } - final TDigestState digest = digests.get(si); - if (percentile != null && digest != null && digest.size() > 0) { - builder.appendDouble(digest.quantile(percentile / 100)); - } else { - builder.appendNull(); + try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + for (int i = 0; i < selected.getPositionCount(); i++) { + int si = selected.getInt(i); + if (si >= digests.size()) { + builder.appendNull(); + continue; + } + final TDigestState digest = digests.get(si); + if (percentile != null && digest != null && digest.size() > 0) { + builder.appendDouble(digest.quantile(percentile / 100)); + } else { + builder.appendNull(); + } } + return builder.build(); } - return builder.build(); } @Override