From b285204fdee1ba831b85a02061dbee90858a170c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 8 Nov 2024 12:40:53 -0500 Subject: [PATCH 01/95] DOCS: Correct explanation of percentiles_bucket (#116499) Corrects the explanation of `percentiles_bucket` so it's clear that it returns the `nth` largest item always, and it rounds `n` towards infinity. That's how it's worked since 2016 but the docs talked about "not greater than" which I don't think is particularly clear. --- .../pipeline/percentiles-bucket-aggregation.asciidoc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc index 658470c8d5a4e..d5bd868258081 100644 --- a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc @@ -127,10 +127,11 @@ And the following may be the response: ==== Percentiles_bucket implementation -The Percentile Bucket returns the nearest input data point that is not greater than the requested percentile; it does not -interpolate between data points. - The percentiles are calculated exactly and is not an approximation (unlike the Percentiles Metric). This means the implementation maintains an in-memory, sorted list of your data to compute the percentiles, before discarding the data. You may run into memory pressure issues if you attempt to calculate percentiles over many millions of data-points in a single `percentiles_bucket`. + +The Percentile Bucket returns the nearest input data point to the requested percentile, rounding indices toward +positive infinity; it does not interpolate between data points. For example, if there are eight data points and +you request the `50%th` percentile, it will return the `4th` item because `ROUND_UP(.50 * (8-1))` is `4`. From 8b3507138634906c293c2a018e306bcddb6d766c Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 8 Nov 2024 09:45:25 -0800 Subject: [PATCH 02/95] Add SLES 15.6 to docker linux exclusions list (#116506) --- .ci/dockerOnLinuxExclusions | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/dockerOnLinuxExclusions b/.ci/dockerOnLinuxExclusions index c150cca590f7d..715ed86188dd5 100644 --- a/.ci/dockerOnLinuxExclusions +++ b/.ci/dockerOnLinuxExclusions @@ -15,6 +15,7 @@ sles-15.2 sles-15.3 sles-15.4 sles-15.5 +sles-15.6 # These OSes are deprecated and filtered starting with 8.0.0, but need to be excluded # for PR checks From af99654dac95e55bc44b9bfe71393f5d8b48b0ef Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Fri, 8 Nov 2024 11:59:32 -0600 Subject: [PATCH 03/95] Add a monitor_stats privilege and allow that privilege for remote cluster privileges (#114964) This commit does the following: * Add a new monitor_stats privilege * Ensure that monitor_stats can be set in the remote_cluster privileges * Give's Kibana the ability to remotely call monitor_stats via RCS 2.0 Since this is the first case where there is more than 1 remote_cluster privilege, the following framework concern has been added: * Ensure that when sending to elder RCS 2.0 clusters that we don't send the new privilege previous only supported all or nothing remote_cluster blocks * Ensure that we when sending API key role descriptors that contains remote_cluster, we don't send the new privileges for RCS 1.0/2.0 if it not new enough * Fix and extend the BWC tests for RCS 1.0 and RCS 2.0 --- docs/changelog/114964.yaml | 6 + .../security/bulk-create-roles.asciidoc | 2 +- .../security/get-builtin-privileges.asciidoc | 4 +- .../org/elasticsearch/TransportVersions.java | 1 + x-pack/plugin/build.gradle | 1 + .../user/GetUserPrivilegesResponse.java | 2 +- .../core/security/authc/Authentication.java | 111 +++++++- .../core/security/authz/RoleDescriptor.java | 34 ++- .../RemoteClusterPermissionGroup.java | 39 ++- .../permission/RemoteClusterPermissions.java | 132 +++++++-- .../core/security/authz/permission/Role.java | 2 +- .../security/authz/permission/SimpleRole.java | 4 +- .../privilege/ClusterPrivilegeResolver.java | 12 +- .../KibanaOwnedReservedRoleDescriptors.java | 13 +- ...usterApiKeyRoleDescriptorBuilderTests.java | 45 ++- .../action/role/PutRoleRequestTests.java | 2 +- .../security/authc/AuthenticationTests.java | 86 +++++- .../security/authz/RoleDescriptorTests.java | 28 ++ .../RemoteClusterPermissionGroupTests.java | 15 +- .../RemoteClusterPermissionsTests.java | 141 ++++++++-- .../authz/store/ReservedRolesStoreTests.java | 2 +- .../security/qa/multi-cluster/build.gradle | 8 +- ...stractRemoteClusterSecurityBWCRestIT.java} | 168 +++++------ ...ClusterSecurityBWCToRCS1ClusterRestIT.java | 69 +++++ ...ClusterSecurityBWCToRCS2ClusterRestIT.java | 90 ++++++ .../RemoteClusterSecurityRestStatsIT.java | 266 ++++++++++++++++++ .../xpack/security/apikey/ApiKeyRestIT.java | 4 +- .../authz/store/CompositeRolesStore.java | 2 +- .../authz/store/RoleDescriptorStore.java | 2 +- .../xpack/security/authz/RBACEngineTests.java | 50 ++-- .../authz/store/CompositeRolesStoreTests.java | 10 +- .../authz/store/FileRolesStoreTests.java | 3 +- .../RestGetUserPrivilegesActionTests.java | 4 +- .../test/privileges/11_builtin.yml | 2 +- 34 files changed, 1156 insertions(+), 204 deletions(-) create mode 100644 docs/changelog/114964.yaml rename x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/{RemoteClusterSecurityBwcRestIT.java => AbstractRemoteClusterSecurityBWCRestIT.java} (65%) create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBWCToRCS1ClusterRestIT.java create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBWCToRCS2ClusterRestIT.java create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestStatsIT.java diff --git a/docs/changelog/114964.yaml b/docs/changelog/114964.yaml new file mode 100644 index 0000000000000..8274aeb76a937 --- /dev/null +++ b/docs/changelog/114964.yaml @@ -0,0 +1,6 @@ +pr: 114964 +summary: Add a `monitor_stats` privilege and allow that privilege for remote cluster + privileges +area: Authorization +type: enhancement +issues: [] diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc index a1fe998c08146..a198f49383907 100644 --- a/docs/reference/rest-api/security/bulk-create-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -327,7 +327,7 @@ The result would then have the `errors` field set to `true` and hold the error f "details": { "my_admin_role": { <4> "type": "action_request_validation_exception", - "reason": "Validation Failed: 1: unknown cluster privilege [bad_cluster_privilege]. a privilege must be either one of the predefined cluster privilege names [manage_own_api_key,manage_data_stream_global_retention,monitor_data_stream_global_retention,none,cancel_task,cross_cluster_replication,cross_cluster_search,delegate_pki,grant_api_key,manage_autoscaling,manage_index_templates,manage_logstash_pipelines,manage_oidc,manage_saml,manage_search_application,manage_search_query_rules,manage_search_synonyms,manage_service_account,manage_token,manage_user_profile,monitor_connector,monitor_enrich,monitor_inference,monitor_ml,monitor_rollup,monitor_snapshot,monitor_text_structure,monitor_watcher,post_behavioral_analytics_event,read_ccr,read_connector_secrets,read_fleet_secrets,read_ilm,read_pipeline,read_security,read_slm,transport_client,write_connector_secrets,write_fleet_secrets,create_snapshot,manage_behavioral_analytics,manage_ccr,manage_connector,manage_enrich,manage_ilm,manage_inference,manage_ml,manage_rollup,manage_slm,manage_watcher,monitor_data_frame_transforms,monitor_transform,manage_api_key,manage_ingest_pipelines,manage_pipeline,manage_data_frame_transforms,manage_transform,manage_security,monitor,manage,all] or a pattern over one of the available cluster actions;" + "reason": "Validation Failed: 1: unknown cluster privilege [bad_cluster_privilege]. a privilege must be either one of the predefined cluster privilege names [manage_own_api_key,manage_data_stream_global_retention,monitor_data_stream_global_retention,none,cancel_task,cross_cluster_replication,cross_cluster_search,delegate_pki,grant_api_key,manage_autoscaling,manage_index_templates,manage_logstash_pipelines,manage_oidc,manage_saml,manage_search_application,manage_search_query_rules,manage_search_synonyms,manage_service_account,manage_token,manage_user_profile,monitor_connector,monitor_enrich,monitor_inference,monitor_ml,monitor_rollup,monitor_snapshot,monitor_stats,monitor_text_structure,monitor_watcher,post_behavioral_analytics_event,read_ccr,read_connector_secrets,read_fleet_secrets,read_ilm,read_pipeline,read_security,read_slm,transport_client,write_connector_secrets,write_fleet_secrets,create_snapshot,manage_behavioral_analytics,manage_ccr,manage_connector,manage_enrich,manage_ilm,manage_inference,manage_ml,manage_rollup,manage_slm,manage_watcher,monitor_data_frame_transforms,monitor_transform,manage_api_key,manage_ingest_pipelines,manage_pipeline,manage_data_frame_transforms,manage_transform,manage_security,monitor,manage,all] or a pattern over one of the available cluster actions;" } } } diff --git a/docs/reference/rest-api/security/get-builtin-privileges.asciidoc b/docs/reference/rest-api/security/get-builtin-privileges.asciidoc index 8435f5539ab9d..7f3d75b926780 100644 --- a/docs/reference/rest-api/security/get-builtin-privileges.asciidoc +++ b/docs/reference/rest-api/security/get-builtin-privileges.asciidoc @@ -111,6 +111,7 @@ A successful call returns an object with "cluster", "index", and "remote_cluster "monitor_ml", "monitor_rollup", "monitor_snapshot", + "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", @@ -152,7 +153,8 @@ A successful call returns an object with "cluster", "index", and "remote_cluster "write" ], "remote_cluster" : [ - "monitor_enrich" + "monitor_enrich", + "monitor_stats" ] } -------------------------------------------------- diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 4edeacfa754c5..3134eb4966115 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -189,6 +189,7 @@ static TransportVersion def(int id) { public static final TransportVersion LOGSDB_TELEMETRY = def(8_784_00_0); public static final TransportVersion LOGSDB_TELEMETRY_STATS = def(8_785_00_0); public static final TransportVersion KQL_QUERY_ADDED = def(8_786_00_0); + public static final TransportVersion ROLE_MONITOR_STATS = def(8_787_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 8b920ac11cee7..193a82436f26a 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -88,5 +88,6 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("esql/60_usage/Basic ESQL usage output (telemetry) non-snapshot version", "The number of functions is constantly increasing") task.skipTest("esql/80_text/reverse text", "The output type changed from TEXT to KEYWORD.") task.skipTest("esql/80_text/values function", "The output type changed from TEXT to KEYWORD.") + task.skipTest("privileges/11_builtin/Test get builtin privileges" ,"unnecessary to test compatibility") }) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java index de351cd59c690..763ab6ccb9886 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java @@ -115,7 +115,7 @@ public boolean hasRemoteIndicesPrivileges() { } public boolean hasRemoteClusterPrivileges() { - return remoteClusterPermissions.hasPrivileges(); + return remoteClusterPermissions.hasAnyPrivileges(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java index 04dda75692208..c2f40a3e393b9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java @@ -36,6 +36,7 @@ import org.elasticsearch.xpack.core.security.authc.service.ServiceAccountSettings; import org.elasticsearch.xpack.core.security.authc.support.AuthenticationContextSerializer; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.InternalUser; import org.elasticsearch.xpack.core.security.user.InternalUsers; @@ -76,6 +77,7 @@ import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.FALLBACK_REALM_NAME; import static org.elasticsearch.xpack.core.security.authc.AuthenticationField.FALLBACK_REALM_TYPE; import static org.elasticsearch.xpack.core.security.authc.RealmDomain.REALM_DOMAIN_PARSER; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.Fields.REMOTE_CLUSTER; import static org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions.ROLE_REMOTE_CLUSTER_PRIVS; /** @@ -233,8 +235,8 @@ public Authentication maybeRewriteForOlderVersion(TransportVersion olderVersion) + "]" ); } - final Map newMetadata = maybeRewriteMetadata(olderVersion, this); + final Authentication newAuthentication; if (isRunAs()) { // The lookup user for run-as currently doesn't have authentication metadata associated with them because @@ -272,12 +274,23 @@ public Authentication maybeRewriteForOlderVersion(TransportVersion olderVersion) } private static Map maybeRewriteMetadata(TransportVersion olderVersion, Authentication authentication) { - if (authentication.isAuthenticatedAsApiKey()) { - return maybeRewriteMetadataForApiKeyRoleDescriptors(olderVersion, authentication); - } else if (authentication.isCrossClusterAccess()) { - return maybeRewriteMetadataForCrossClusterAccessAuthentication(olderVersion, authentication); - } else { - return authentication.getAuthenticatingSubject().getMetadata(); + try { + if (authentication.isAuthenticatedAsApiKey()) { + return maybeRewriteMetadataForApiKeyRoleDescriptors(olderVersion, authentication); + } else if (authentication.isCrossClusterAccess()) { + return maybeRewriteMetadataForCrossClusterAccessAuthentication(olderVersion, authentication); + } else { + return authentication.getAuthenticatingSubject().getMetadata(); + } + } catch (Exception e) { + // CCS workflows may swallow the exception message making this difficult to troubleshoot, so we explicitly log and re-throw + // here. It may result in duplicate logs, so we only log the message at warn level. + if (logger.isDebugEnabled()) { + logger.debug("Un-expected exception thrown while rewriting metadata. This is likely a bug.", e); + } else { + logger.warn("Un-expected exception thrown while rewriting metadata. This is likely a bug [" + e.getMessage() + "]"); + } + throw e; } } @@ -1323,6 +1336,7 @@ private static Map maybeRewriteMetadataForApiKeyRoleDescriptors( if (authentication.getEffectiveSubject().getTransportVersion().onOrAfter(ROLE_REMOTE_CLUSTER_PRIVS) && streamVersion.before(ROLE_REMOTE_CLUSTER_PRIVS)) { + // the authentication understands the remote_cluster field but the stream does not metadata = new HashMap<>(metadata); metadata.put( AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY, @@ -1336,7 +1350,26 @@ private static Map maybeRewriteMetadataForApiKeyRoleDescriptors( (BytesReference) metadata.get(AuthenticationField.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY) ) ); - } + } else if (authentication.getEffectiveSubject().getTransportVersion().onOrAfter(ROLE_REMOTE_CLUSTER_PRIVS) + && streamVersion.onOrAfter(ROLE_REMOTE_CLUSTER_PRIVS)) { + // both the authentication object and the stream understand the remote_cluster field + // check each individual permission and remove as needed + metadata = new HashMap<>(metadata); + metadata.put( + AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY, + maybeRemoveRemoteClusterPrivilegesFromRoleDescriptors( + (BytesReference) metadata.get(AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY), + streamVersion + ) + ); + metadata.put( + AuthenticationField.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY, + maybeRemoveRemoteClusterPrivilegesFromRoleDescriptors( + (BytesReference) metadata.get(AuthenticationField.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY), + streamVersion + ) + ); + } if (authentication.getEffectiveSubject().getTransportVersion().onOrAfter(VERSION_API_KEY_ROLES_AS_BYTES) && streamVersion.before(VERSION_API_KEY_ROLES_AS_BYTES)) { @@ -1417,7 +1450,7 @@ private static BytesReference convertRoleDescriptorsMapToBytes(Map roleDescriptorsMap = convertRoleDescriptorsBytesToMap(roleDescriptorsBytes); + final Map roleDescriptorsMapMutated = new HashMap<>(roleDescriptorsMap); + final AtomicBoolean modified = new AtomicBoolean(false); + roleDescriptorsMap.forEach((key, value) -> { + if (value instanceof Map) { + Map roleDescriptor = (Map) value; + roleDescriptor.forEach((innerKey, innerValue) -> { + // example: remote_cluster=[{privileges=[monitor_enrich, monitor_stats] + if (REMOTE_CLUSTER.getPreferredName().equals(innerKey)) { + assert innerValue instanceof List; + RemoteClusterPermissions discoveredRemoteClusterPermission = new RemoteClusterPermissions( + (List>>) innerValue + ); + RemoteClusterPermissions mutated = discoveredRemoteClusterPermission.removeUnsupportedPrivileges(outboundVersion); + if (mutated.equals(discoveredRemoteClusterPermission) == false) { + // swap out the old value with the new value + modified.set(true); + Map remoteClusterMap = new HashMap<>((Map) roleDescriptorsMapMutated.get(key)); + if (mutated.hasAnyPrivileges()) { + // has at least one group with privileges + remoteClusterMap.put(innerKey, mutated.toMap()); + } else { + // has no groups with privileges + remoteClusterMap.remove(innerKey); + } + roleDescriptorsMapMutated.put(key, remoteClusterMap); + } + } + }); + } + }); + if (modified.get()) { + logger.debug( + "mutated role descriptors. Changed from {} to {} for outbound version {}", + roleDescriptorsMap, + roleDescriptorsMapMutated, + outboundVersion + ); + return convertRoleDescriptorsMapToBytes(roleDescriptorsMapMutated); + } else { + // No need to serialize if we did not change anything. + logger.trace("no change to role descriptors {} for outbound version {}", roleDescriptorsMap, outboundVersion); + return roleDescriptorsBytes; + } + } + static boolean equivalentRealms(String name1, String type1, String name2, String type2) { if (false == type1.equals(type2)) { return false; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index 8d069caf0496f..9f5aaa8562a88 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.core.security.authz; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; @@ -62,6 +64,7 @@ public class RoleDescriptor implements ToXContentObject, Writeable { public static final TransportVersion SECURITY_ROLE_DESCRIPTION = TransportVersions.V_8_15_0; public static final String ROLE_TYPE = "role"; + private static final Logger logger = LogManager.getLogger(RoleDescriptor.class); private final String name; private final String[] clusterPrivileges; @@ -191,7 +194,7 @@ public RoleDescriptor( ? Collections.unmodifiableMap(transientMetadata) : Collections.singletonMap("enabled", true); this.remoteIndicesPrivileges = remoteIndicesPrivileges != null ? remoteIndicesPrivileges : RemoteIndicesPrivileges.NONE; - this.remoteClusterPermissions = remoteClusterPermissions != null && remoteClusterPermissions.hasPrivileges() + this.remoteClusterPermissions = remoteClusterPermissions != null && remoteClusterPermissions.hasAnyPrivileges() ? remoteClusterPermissions : RemoteClusterPermissions.NONE; this.restriction = restriction != null ? restriction : Restriction.NONE; @@ -263,7 +266,7 @@ public boolean hasRemoteIndicesPrivileges() { } public boolean hasRemoteClusterPermissions() { - return remoteClusterPermissions.hasPrivileges(); + return remoteClusterPermissions.hasAnyPrivileges(); } public RemoteClusterPermissions getRemoteClusterPermissions() { @@ -830,25 +833,32 @@ private static RemoteClusterPermissions parseRemoteCluster(final String roleName currentFieldName = parser.currentName(); } else if (Fields.PRIVILEGES.match(currentFieldName, parser.getDeprecationHandler())) { privileges = readStringArray(roleName, parser, false); - if (privileges.length != 1 - || RemoteClusterPermissions.getSupportedRemoteClusterPermissions() - .contains(privileges[0].trim().toLowerCase(Locale.ROOT)) == false) { - throw new ElasticsearchParseException( - "failed to parse remote_cluster for role [{}]. " - + RemoteClusterPermissions.getSupportedRemoteClusterPermissions() - + " is the only value allowed for [{}] within [remote_cluster]", + if (Arrays.stream(privileges) + .map(s -> s.toLowerCase(Locale.ROOT).trim()) + .allMatch(RemoteClusterPermissions.getSupportedRemoteClusterPermissions()::contains) == false) { + final String message = String.format( + Locale.ROOT, + "failed to parse remote_cluster for role [%s]. " + + "%s are the only values allowed for [%s] within [remote_cluster]. Found %s", roleName, - currentFieldName + RemoteClusterPermissions.getSupportedRemoteClusterPermissions(), + currentFieldName, + Arrays.toString(privileges) ); + logger.info(message); + throw new ElasticsearchParseException(message); } } else if (Fields.CLUSTERS.match(currentFieldName, parser.getDeprecationHandler())) { clusters = readStringArray(roleName, parser, false); } else { - throw new ElasticsearchParseException( - "failed to parse remote_cluster for role [{}]. unexpected field [{}]", + final String message = String.format( + Locale.ROOT, + "failed to parse remote_cluster for role [%s]. unexpected field [%s]", roleName, currentFieldName ); + logger.info(message); + throw new ElasticsearchParseException(message); } } if (privileges != null && clusters == null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroup.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroup.java index 1c34a7829fcbb..ec245fae28612 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroup.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroup.java @@ -13,11 +13,15 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.support.StringMatcher; import java.io.IOException; import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.Fields.CLUSTERS; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.Fields.PRIVILEGES; /** * Represents a group of permissions for a remote cluster. For example: @@ -41,6 +45,14 @@ public RemoteClusterPermissionGroup(StreamInput in) throws IOException { remoteClusterAliasMatcher = StringMatcher.of(remoteClusterAliases); } + public RemoteClusterPermissionGroup(Map> remoteClusterGroup) { + assert remoteClusterGroup.get(PRIVILEGES.getPreferredName()) != null : "privileges must be non-null"; + assert remoteClusterGroup.get(CLUSTERS.getPreferredName()) != null : "clusters must be non-null"; + clusterPrivileges = remoteClusterGroup.get(PRIVILEGES.getPreferredName()).toArray(new String[0]); + remoteClusterAliases = remoteClusterGroup.get(CLUSTERS.getPreferredName()).toArray(new String[0]); + remoteClusterAliasMatcher = StringMatcher.of(remoteClusterAliases); + } + /** * @param clusterPrivileges The list of cluster privileges that are allowed for the remote cluster. must not be null or empty. * @param remoteClusterAliases The list of remote clusters that the privileges apply to. must not be null or empty. @@ -53,10 +65,14 @@ public RemoteClusterPermissionGroup(String[] clusterPrivileges, String[] remoteC throw new IllegalArgumentException("remote cluster groups must not be null or empty"); } if (Arrays.stream(clusterPrivileges).anyMatch(s -> Strings.hasText(s) == false)) { - throw new IllegalArgumentException("remote_cluster privileges must contain valid non-empty, non-null values"); + throw new IllegalArgumentException( + "remote_cluster privileges must contain valid non-empty, non-null values " + Arrays.toString(clusterPrivileges) + ); } if (Arrays.stream(remoteClusterAliases).anyMatch(s -> Strings.hasText(s) == false)) { - throw new IllegalArgumentException("remote_cluster clusters aliases must contain valid non-empty, non-null values"); + throw new IllegalArgumentException( + "remote_cluster clusters aliases must contain valid non-empty, non-null values " + Arrays.toString(remoteClusterAliases) + ); } this.clusterPrivileges = clusterPrivileges; @@ -86,11 +102,24 @@ public String[] remoteClusterAliases() { return Arrays.copyOf(remoteClusterAliases, remoteClusterAliases.length); } + /** + * Converts the group to a map representation. + * @return A map representation of the group. + */ + public Map> toMap() { + return Map.of( + PRIVILEGES.getPreferredName(), + Arrays.asList(clusterPrivileges), + CLUSTERS.getPreferredName(), + Arrays.asList(remoteClusterAliases) + ); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.array(RoleDescriptor.Fields.PRIVILEGES.getPreferredName(), clusterPrivileges); - builder.array(RoleDescriptor.Fields.CLUSTERS.getPreferredName(), remoteClusterAliases); + builder.array(PRIVILEGES.getPreferredName(), clusterPrivileges); + builder.array(CLUSTERS.getPreferredName(), remoteClusterAliases); builder.endObject(); return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissions.java index 0d8880c33720b..1928cf117dde3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissions.java @@ -29,13 +29,19 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.TreeSet; import java.util.stream.Collectors; +import static org.elasticsearch.TransportVersions.ROLE_MONITOR_STATS; + /** * Represents the set of permissions for remote clusters. This is intended to be the model for both the {@link RoleDescriptor} - * and {@link Role}. This model is not intended to be sent to a remote cluster, but can be (wire) serialized within a single cluster - * as well as the Xcontent serialization for the REST API and persistence of the role in the security index. The privileges modeled here - * will be converted to the appropriate cluster privileges when sent to a remote cluster. + * and {@link Role}. This model is intended to be converted to local cluster permissions + * {@link #collapseAndRemoveUnsupportedPrivileges(String, TransportVersion)} before sent to the remote cluster. This model also be included + * in the role descriptors for (normal) API keys sent between nodes/clusters. In both cases the outbound transport version can be used to + * remove permissions that are not available to older nodes or clusters. The methods {@link #removeUnsupportedPrivileges(TransportVersion)} + * and {@link #collapseAndRemoveUnsupportedPrivileges(String, TransportVersion)} are used to aid in ensuring correct privileges per + * transport version. * For example, on the local/querying cluster this model represents the following: * * "remote_cluster" : [ @@ -49,15 +55,18 @@ * } * ] * - * when sent to the remote cluster "clusterA", the privileges will be converted to the appropriate cluster privileges. For example: + * (RCS 2.0) when sent to the remote cluster "clusterA", the privileges will be converted to the appropriate cluster privileges. + * For example: * * "cluster": ["foo"] * - * and when sent to the remote cluster "clusterB", the privileges will be converted to the appropriate cluster privileges. For example: + * and (RCS 2.0) when sent to the remote cluster "clusterB", the privileges will be converted to the appropriate cluster privileges. + * For example: * * "cluster": ["bar"] * - * If the remote cluster does not support the privilege, as determined by the remote cluster version, the privilege will be not be sent. + * For normal API keys and their role descriptors :If the remote cluster does not support the privilege, the privilege will be not be sent. + * Upstream code performs the removal, but this class owns the business logic for how to remove per outbound version. */ public class RemoteClusterPermissions implements NamedWriteable, ToXContentObject { @@ -70,19 +79,33 @@ public class RemoteClusterPermissions implements NamedWriteable, ToXContentObjec // package private non-final for testing static Map> allowedRemoteClusterPermissions = Map.of( ROLE_REMOTE_CLUSTER_PRIVS, - Set.of(ClusterPrivilegeResolver.MONITOR_ENRICH.name()) + Set.of(ClusterPrivilegeResolver.MONITOR_ENRICH.name()), + ROLE_MONITOR_STATS, + Set.of(ClusterPrivilegeResolver.MONITOR_STATS.name()) ); + static final TransportVersion lastTransportVersionPermission = allowedRemoteClusterPermissions.keySet() + .stream() + .max(TransportVersion::compareTo) + .orElseThrow(); public static final RemoteClusterPermissions NONE = new RemoteClusterPermissions(); public static Set getSupportedRemoteClusterPermissions() { - return allowedRemoteClusterPermissions.values().stream().flatMap(Set::stream).collect(Collectors.toSet()); + return allowedRemoteClusterPermissions.values().stream().flatMap(Set::stream).collect(Collectors.toCollection(TreeSet::new)); } public RemoteClusterPermissions(StreamInput in) throws IOException { remoteClusterPermissionGroups = in.readNamedWriteableCollectionAsList(RemoteClusterPermissionGroup.class); } + public RemoteClusterPermissions(List>> remoteClusters) { + remoteClusterPermissionGroups = new ArrayList<>(); + for (Map> remoteCluster : remoteClusters) { + RemoteClusterPermissionGroup remoteClusterPermissionGroup = new RemoteClusterPermissionGroup(remoteCluster); + remoteClusterPermissionGroups.add(remoteClusterPermissionGroup); + } + } + public RemoteClusterPermissions() { remoteClusterPermissionGroups = new ArrayList<>(); } @@ -97,10 +120,64 @@ public RemoteClusterPermissions addGroup(RemoteClusterPermissionGroup remoteClus } /** - * Gets the privilege names for the remote cluster. This method will collapse all groups to single String[] all lowercase - * and will only return the appropriate privileges for the provided remote cluster version. + * Will remove any unsupported privileges for the provided outbound version. This method will not modify the current instance. + * This is useful for (normal) API keys role descriptors to help ensure that we don't send unsupported privileges. The result of + * this method may result in no groups if all privileges are removed. {@link #hasAnyPrivileges()} can be used to check if there are + * any privileges left. + * @param outboundVersion The version by which to remove unsupported privileges, this is typically the version of the remote cluster + * @return a new instance of RemoteClusterPermissions with the unsupported privileges removed */ - public String[] privilegeNames(final String remoteClusterAlias, TransportVersion remoteClusterVersion) { + public RemoteClusterPermissions removeUnsupportedPrivileges(TransportVersion outboundVersion) { + Objects.requireNonNull(outboundVersion, "outboundVersion must not be null"); + if (outboundVersion.onOrAfter(lastTransportVersionPermission)) { + return this; + } + RemoteClusterPermissions copyForOutboundVersion = new RemoteClusterPermissions(); + Set allowedPermissionsPerVersion = getAllowedPermissionsPerVersion(outboundVersion); + for (RemoteClusterPermissionGroup group : remoteClusterPermissionGroups) { + String[] privileges = group.clusterPrivileges(); + List outboundPrivileges = new ArrayList<>(privileges.length); + for (String privilege : privileges) { + if (allowedPermissionsPerVersion.contains(privilege.toLowerCase(Locale.ROOT))) { + outboundPrivileges.add(privilege); + } + } + if (outboundPrivileges.isEmpty() == false) { + RemoteClusterPermissionGroup outboundGroup = new RemoteClusterPermissionGroup( + outboundPrivileges.toArray(new String[0]), + group.remoteClusterAliases() + ); + copyForOutboundVersion.addGroup(outboundGroup); + if (logger.isDebugEnabled()) { + if (group.equals(outboundGroup) == false) { + logger.debug( + "Removed unsupported remote cluster permissions. Remaining {} for remote cluster [{}] for version [{}]." + + "Due to the remote cluster version, only the following permissions are allowed: {}", + outboundPrivileges, + group.remoteClusterAliases(), + outboundVersion, + allowedPermissionsPerVersion + ); + } + } + } else { + logger.debug( + "Removed all remote cluster permissions for remote cluster [{}]. " + + "Due to the remote cluster version, only the following permissions are allowed: {}", + group.remoteClusterAliases(), + allowedPermissionsPerVersion + ); + } + } + return copyForOutboundVersion; + } + + /** + * Gets all the privilege names for the remote cluster. This method will collapse all groups to single String[] all lowercase + * and will only return the appropriate privileges for the provided remote cluster version. This is useful for RCS 2.0 to ensure + * that we properly convert all the remote_cluster -> cluster privileges per remote cluster. + */ + public String[] collapseAndRemoveUnsupportedPrivileges(final String remoteClusterAlias, TransportVersion outboundVersion) { // get all privileges for the remote cluster Set groupPrivileges = remoteClusterPermissionGroups.stream() @@ -111,13 +188,7 @@ public String[] privilegeNames(final String remoteClusterAlias, TransportVersion .collect(Collectors.toSet()); // find all the privileges that are allowed for the remote cluster version - Set allowedPermissionsPerVersion = allowedRemoteClusterPermissions.entrySet() - .stream() - .filter((entry) -> entry.getKey().onOrBefore(remoteClusterVersion)) - .map(Map.Entry::getValue) - .flatMap(Set::stream) - .map(s -> s.toLowerCase(Locale.ROOT)) - .collect(Collectors.toSet()); + Set allowedPermissionsPerVersion = getAllowedPermissionsPerVersion(outboundVersion); // intersect the two sets to get the allowed privileges for the remote cluster version Set allowedPrivileges = new HashSet<>(groupPrivileges); @@ -137,13 +208,21 @@ public String[] privilegeNames(final String remoteClusterAlias, TransportVersion return allowedPrivileges.stream().sorted().toArray(String[]::new); } + /** + * Converts this object to it's {@link Map} representation. + * @return a list of maps representing the remote cluster permissions + */ + public List>> toMap() { + return remoteClusterPermissionGroups.stream().map(RemoteClusterPermissionGroup::toMap).toList(); + } + /** * Validates the remote cluster permissions (regardless of remote cluster version). * This method will throw an {@link IllegalArgumentException} if the permissions are invalid. * Generally, this method is just a safety check and validity should be checked before adding the permissions to this class. */ public void validate() { - assert hasPrivileges(); + assert hasAnyPrivileges(); Set invalid = getUnsupportedPrivileges(); if (invalid.isEmpty() == false) { throw new IllegalArgumentException( @@ -173,11 +252,11 @@ private Set getUnsupportedPrivileges() { return invalid; } - public boolean hasPrivileges(final String remoteClusterAlias) { + public boolean hasAnyPrivileges(final String remoteClusterAlias) { return remoteClusterPermissionGroups.stream().anyMatch(remoteIndicesGroup -> remoteIndicesGroup.hasPrivileges(remoteClusterAlias)); } - public boolean hasPrivileges() { + public boolean hasAnyPrivileges() { return remoteClusterPermissionGroups.isEmpty() == false; } @@ -185,6 +264,16 @@ public List groups() { return Collections.unmodifiableList(remoteClusterPermissionGroups); } + private Set getAllowedPermissionsPerVersion(TransportVersion outboundVersion) { + return allowedRemoteClusterPermissions.entrySet() + .stream() + .filter((entry) -> entry.getKey().onOrBefore(outboundVersion)) + .map(Map.Entry::getValue) + .flatMap(Set::stream) + .map(s -> s.toLowerCase(Locale.ROOT)) + .collect(Collectors.toSet()); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { for (RemoteClusterPermissionGroup remoteClusterPermissionGroup : remoteClusterPermissionGroups) { @@ -220,4 +309,5 @@ public String toString() { public String getWriteableName() { return NAME; } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java index d8d56a4fbb247..f52f8f85f006d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java @@ -283,7 +283,7 @@ public Builder addRemoteIndicesGroup( public Builder addRemoteClusterPermissions(RemoteClusterPermissions remoteClusterPermissions) { Objects.requireNonNull(remoteClusterPermissions, "remoteClusterPermissions must not be null"); assert this.remoteClusterPermissions == null : "addRemoteClusterPermissions should only be called once"; - if (remoteClusterPermissions.hasPrivileges()) { + if (remoteClusterPermissions.hasAnyPrivileges()) { remoteClusterPermissions.validate(); } this.remoteClusterPermissions = remoteClusterPermissions; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRole.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRole.java index 08c86c5f71f4f..0ec9d2a48316a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRole.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/SimpleRole.java @@ -210,7 +210,7 @@ public RoleDescriptorsIntersection getRoleDescriptorsIntersectionForRemoteCluste final RemoteIndicesPermission remoteIndicesPermission = this.remoteIndicesPermission.forCluster(remoteClusterAlias); if (remoteIndicesPermission.remoteIndicesGroups().isEmpty() - && remoteClusterPermissions.hasPrivileges(remoteClusterAlias) == false) { + && remoteClusterPermissions.hasAnyPrivileges(remoteClusterAlias) == false) { return RoleDescriptorsIntersection.EMPTY; } @@ -224,7 +224,7 @@ public RoleDescriptorsIntersection getRoleDescriptorsIntersectionForRemoteCluste return new RoleDescriptorsIntersection( new RoleDescriptor( REMOTE_USER_ROLE_NAME, - remoteClusterPermissions.privilegeNames(remoteClusterAlias, remoteClusterVersion), + remoteClusterPermissions.collapseAndRemoveUnsupportedPrivileges(remoteClusterAlias, remoteClusterVersion), // The role descriptors constructed here may be cached in raw byte form, using a hash of their content as a // cache key; we therefore need deterministic order when constructing them here, to ensure cache hits for // equivalent role descriptors diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java index 3d1b378f4f51e..00d45fb135fb2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java @@ -110,6 +110,8 @@ public class ClusterPrivilegeResolver { private static final Set MONITOR_WATCHER_PATTERN = Set.of("cluster:monitor/xpack/watcher/*"); private static final Set MONITOR_ROLLUP_PATTERN = Set.of("cluster:monitor/xpack/rollup/*"); private static final Set MONITOR_ENRICH_PATTERN = Set.of("cluster:monitor/xpack/enrich/*", "cluster:admin/xpack/enrich/get"); + // intentionally cluster:monitor/stats* to match cluster:monitor/stats, cluster:monitor/stats[n] and cluster:monitor/stats/remote + private static final Set MONITOR_STATS_PATTERN = Set.of("cluster:monitor/stats*"); private static final Set ALL_CLUSTER_PATTERN = Set.of( "cluster:*", @@ -208,7 +210,11 @@ public class ClusterPrivilegeResolver { // esql enrich "cluster:monitor/xpack/enrich/esql/resolve_policy", "cluster:internal:data/read/esql/open_exchange", - "cluster:internal:data/read/esql/exchange" + "cluster:internal:data/read/esql/exchange", + // cluster stats for remote clusters + "cluster:monitor/stats/remote", + "cluster:monitor/stats", + "cluster:monitor/stats[n]" ); private static final Set CROSS_CLUSTER_REPLICATION_PATTERN = Set.of( RemoteClusterService.REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME, @@ -243,6 +249,7 @@ public class ClusterPrivilegeResolver { public static final NamedClusterPrivilege MONITOR_WATCHER = new ActionClusterPrivilege("monitor_watcher", MONITOR_WATCHER_PATTERN); public static final NamedClusterPrivilege MONITOR_ROLLUP = new ActionClusterPrivilege("monitor_rollup", MONITOR_ROLLUP_PATTERN); public static final NamedClusterPrivilege MONITOR_ENRICH = new ActionClusterPrivilege("monitor_enrich", MONITOR_ENRICH_PATTERN); + public static final NamedClusterPrivilege MONITOR_STATS = new ActionClusterPrivilege("monitor_stats", MONITOR_STATS_PATTERN); public static final NamedClusterPrivilege MANAGE = new ActionClusterPrivilege("manage", ALL_CLUSTER_PATTERN, ALL_SECURITY_PATTERN); public static final NamedClusterPrivilege MANAGE_INFERENCE = new ActionClusterPrivilege("manage_inference", MANAGE_INFERENCE_PATTERN); public static final NamedClusterPrivilege MANAGE_ML = new ActionClusterPrivilege("manage_ml", MANAGE_ML_PATTERN); @@ -424,6 +431,7 @@ public class ClusterPrivilegeResolver { MONITOR_WATCHER, MONITOR_ROLLUP, MONITOR_ENRICH, + MONITOR_STATS, MANAGE, MANAGE_CONNECTOR, MANAGE_INFERENCE, @@ -499,7 +507,7 @@ public static NamedClusterPrivilege resolve(String name) { + Strings.collectionToCommaDelimitedString(VALUES.keySet()) + "] or a pattern over one of the available " + "cluster actions"; - logger.debug(errorMessage); + logger.warn(errorMessage); throw new IllegalArgumentException(errorMessage); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 5fb753ab55aab..259e66f633bac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -20,6 +20,9 @@ import org.elasticsearch.xpack.core.security.action.profile.SuggestProfilesAction; import org.elasticsearch.xpack.core.security.action.user.ProfileHasPrivilegesAction; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.security.support.MetadataUtils; @@ -497,7 +500,15 @@ static RoleDescriptor kibanaSystem(String name) { getRemoteIndicesReadPrivileges("metrics-apm.*"), getRemoteIndicesReadPrivileges("traces-apm.*"), getRemoteIndicesReadPrivileges("traces-apm-*") }, - null, + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup( + RemoteClusterPermissions.getSupportedRemoteClusterPermissions() + .stream() + .filter(s -> s.equals(ClusterPrivilegeResolver.MONITOR_STATS.name())) + .toArray(String[]::new), + new String[] { "*" } + ) + ), null, "Grants access necessary for the Kibana system user to read from and write to the Kibana indices, " + "manage index templates and tokens, and check the availability of the Elasticsearch cluster. " diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilderTests.java index 22590e155e642..1dfd68ea95485 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilderTests.java @@ -10,11 +10,16 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.core.Strings; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import java.io.IOException; import java.util.List; @@ -27,6 +32,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class CrossClusterApiKeyRoleDescriptorBuilderTests extends ESTestCase { @@ -356,9 +362,42 @@ public void testEmptyAccessIsNotAllowed() throws IOException { } public void testAPIKeyAllowsAllRemoteClusterPrivilegesForCCS() { - // if users can add remote cluster permissions to a role, then the APIKey should also allow that for that permission - // the inverse however, is not guaranteed. cross_cluster_search exists largely for internal use and is not exposed to the users role - assertTrue(Set.of(CCS_CLUSTER_PRIVILEGE_NAMES).containsAll(RemoteClusterPermissions.getSupportedRemoteClusterPermissions())); + // test to help ensure that at least 1 action that is allowed by the remote cluster permissions are supported by CCS + List actionsToTest = List.of("cluster:monitor/xpack/enrich/esql/resolve_policy", "cluster:monitor/stats/remote"); + // if you add new remote cluster permissions, please define an action we can test to help ensure it is supported by RCS 2.0 + assertThat(actionsToTest.size(), equalTo(RemoteClusterPermissions.getSupportedRemoteClusterPermissions().size())); + + for (String privilege : RemoteClusterPermissions.getSupportedRemoteClusterPermissions()) { + boolean actionPassesRemoteClusterPermissionCheck = false; + ClusterPrivilege clusterPrivilege = ClusterPrivilegeResolver.resolve(privilege); + // each remote cluster privilege has an action to test + for (String action : actionsToTest) { + if (clusterPrivilege.buildPermission(ClusterPermission.builder()) + .build() + .check(action, mock(TransportRequest.class), AuthenticationTestHelper.builder().build())) { + actionPassesRemoteClusterPermissionCheck = true; + break; + } + } + assertTrue( + "privilege [" + privilege + "] does not cover any actions among [" + actionsToTest + "]", + actionPassesRemoteClusterPermissionCheck + ); + } + // test that the actions pass the privilege check for CCS + for (String privilege : Set.of(CCS_CLUSTER_PRIVILEGE_NAMES)) { + boolean actionPassesRemoteCCSCheck = false; + ClusterPrivilege clusterPrivilege = ClusterPrivilegeResolver.resolve(privilege); + for (String action : actionsToTest) { + if (clusterPrivilege.buildPermission(ClusterPermission.builder()) + .build() + .check(action, mock(TransportRequest.class), AuthenticationTestHelper.builder().build())) { + actionPassesRemoteCCSCheck = true; + break; + } + } + assertTrue(actionPassesRemoteCCSCheck); + } } private static void assertRoleDescriptor( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java index 97255502bc7be..239d48ca9c2e1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java @@ -104,7 +104,7 @@ public void testValidationErrorWithUnknownRemoteClusterPrivilegeName() { } request.putRemoteCluster(remoteClusterPermissions); assertValidationError("Invalid remote_cluster permissions found. Please remove the following: [", request); - assertValidationError("Only [monitor_enrich] are allowed", request); + assertValidationError("Only [monitor_enrich, monitor_stats] are allowed", request); } public void testValidationErrorWithEmptyClustersInRemoteIndices() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTests.java index 66e246d1c8a50..c999c970a76da 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.transport.RemoteClusterPortSettings; +import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -32,6 +33,7 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptorsIntersection; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.User; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.Arrays; @@ -42,6 +44,8 @@ import java.util.stream.Collectors; import static java.util.Map.entry; +import static org.elasticsearch.TransportVersions.ROLE_MONITOR_STATS; +import static org.elasticsearch.xpack.core.security.authc.Authentication.VERSION_API_KEY_ROLES_AS_BYTES; import static org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper.randomCrossClusterAccessSubjectInfo; import static org.elasticsearch.xpack.core.security.authc.CrossClusterAccessSubjectInfoTests.randomRoleDescriptorsIntersection; import static org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions.ROLE_REMOTE_CLUSTER_PRIVS; @@ -1070,7 +1074,7 @@ public void testMaybeRewriteMetadataForApiKeyRoleDescriptorsWithRemoteIndices() // pick a version before that of the authentication instance to force a rewrite final TransportVersion olderVersion = TransportVersionUtils.randomVersionBetween( random(), - Authentication.VERSION_API_KEY_ROLES_AS_BYTES, + VERSION_API_KEY_ROLES_AS_BYTES, TransportVersionUtils.getPreviousVersion(original.getEffectiveSubject().getTransportVersion()) ); @@ -1115,7 +1119,7 @@ public void testMaybeRewriteMetadataForApiKeyRoleDescriptorsWithRemoteCluster() // pick a version before that of the authentication instance to force a rewrite final TransportVersion olderVersion = TransportVersionUtils.randomVersionBetween( random(), - Authentication.VERSION_API_KEY_ROLES_AS_BYTES, + VERSION_API_KEY_ROLES_AS_BYTES, TransportVersionUtils.getPreviousVersion(original.getEffectiveSubject().getTransportVersion()) ); @@ -1135,6 +1139,84 @@ public void testMaybeRewriteMetadataForApiKeyRoleDescriptorsWithRemoteCluster() ); } + public void testMaybeRewriteMetadataForApiKeyRoleDescriptorsWithRemoteClusterRemovePrivs() throws IOException { + final String apiKeyId = randomAlphaOfLengthBetween(1, 10); + final String apiKeyName = randomAlphaOfLengthBetween(1, 10); + Map metadata = Map.ofEntries( + entry(AuthenticationField.API_KEY_ID_KEY, apiKeyId), + entry(AuthenticationField.API_KEY_NAME_KEY, apiKeyName), + entry(AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY, new BytesArray(""" + {"base_role":{"cluster":["all"], + "remote_cluster":[{"privileges":["monitor_enrich", "monitor_stats"],"clusters":["*"]}] + }}""")), + entry(AuthenticationField.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY, new BytesArray(""" + {"limited_by_role":{"cluster":["*"], + "remote_cluster":[{"privileges":["monitor_enrich", "monitor_stats"],"clusters":["*"]}] + }}""")) + ); + + final Authentication with2privs = AuthenticationTestHelper.builder() + .apiKey() + .metadata(metadata) + .transportVersion(TransportVersion.current()) + .build(); + + // pick a version that will only remove one of the two privileges + final TransportVersion olderVersion = TransportVersionUtils.randomVersionBetween( + random(), + ROLE_REMOTE_CLUSTER_PRIVS, + TransportVersionUtils.getPreviousVersion(ROLE_MONITOR_STATS) + ); + + Map rewrittenMetadata = with2privs.maybeRewriteForOlderVersion(olderVersion).getEffectiveSubject().getMetadata(); + assertThat(rewrittenMetadata.keySet(), equalTo(with2privs.getAuthenticatingSubject().getMetadata().keySet())); + + // only one of the two privileges are left after the rewrite + BytesReference baseRoleBytes = (BytesReference) rewrittenMetadata.get(AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY); + Map baseRoleAsMap = XContentHelper.convertToMap(baseRoleBytes, false, XContentType.JSON).v2(); + assertThat(ObjectPath.eval("base_role.remote_cluster.0.privileges", baseRoleAsMap), Matchers.contains("monitor_enrich")); + assertThat(ObjectPath.eval("base_role.remote_cluster.0.clusters", baseRoleAsMap), notNullValue()); + BytesReference limitedByRoleBytes = (BytesReference) rewrittenMetadata.get( + AuthenticationField.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY + ); + Map limitedByRoleAsMap = XContentHelper.convertToMap(limitedByRoleBytes, false, XContentType.JSON).v2(); + assertThat(ObjectPath.eval("limited_by_role.remote_cluster.0.privileges", limitedByRoleAsMap), Matchers.contains("monitor_enrich")); + assertThat(ObjectPath.eval("limited_by_role.remote_cluster.0.clusters", limitedByRoleAsMap), notNullValue()); + + // same version, but it removes the only defined privilege + metadata = Map.ofEntries( + entry(AuthenticationField.API_KEY_ID_KEY, apiKeyId), + entry(AuthenticationField.API_KEY_NAME_KEY, apiKeyName), + entry(AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY, new BytesArray(""" + {"base_role":{"cluster":["all"], + "remote_cluster":[{"privileges":["monitor_stats"],"clusters":["*"]}] + }}""")), + entry(AuthenticationField.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY, new BytesArray(""" + {"limited_by_role":{"cluster":["*"], + "remote_cluster":[{"privileges":["monitor_stats"],"clusters":["*"]}] + }}""")) + ); + + final Authentication with1priv = AuthenticationTestHelper.builder() + .apiKey() + .metadata(metadata) + .transportVersion(TransportVersion.current()) + .build(); + + rewrittenMetadata = with1priv.maybeRewriteForOlderVersion(olderVersion).getEffectiveSubject().getMetadata(); + assertThat(rewrittenMetadata.keySet(), equalTo(with1priv.getAuthenticatingSubject().getMetadata().keySet())); + + // the one privileges is removed after the rewrite, which removes the full "remote_cluster" object + baseRoleBytes = (BytesReference) rewrittenMetadata.get(AuthenticationField.API_KEY_ROLE_DESCRIPTORS_KEY); + baseRoleAsMap = XContentHelper.convertToMap(baseRoleBytes, false, XContentType.JSON).v2(); + assertThat(ObjectPath.eval("base_role.remote_cluster", baseRoleAsMap), nullValue()); + assertThat(ObjectPath.eval("base_role.cluster", baseRoleAsMap), notNullValue()); + limitedByRoleBytes = (BytesReference) rewrittenMetadata.get(AuthenticationField.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY); + limitedByRoleAsMap = XContentHelper.convertToMap(limitedByRoleBytes, false, XContentType.JSON).v2(); + assertThat(ObjectPath.eval("limited_by_role.remote_cluster", limitedByRoleAsMap), nullValue()); + assertThat(ObjectPath.eval("limited_by_role.cluster", limitedByRoleAsMap), notNullValue()); + } + public void testMaybeRemoveRemoteIndicesFromRoleDescriptors() { final boolean includeClusterPrivileges = randomBoolean(); final BytesReference roleWithoutRemoteIndices = new BytesArray(Strings.format(""" diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java index 94430a4ed5bba..218876c7d40e8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java @@ -542,6 +542,34 @@ public void testParseInvalidRemoteCluster() throws IOException { () -> RoleDescriptor.parserBuilder().build().parse("test", new BytesArray(q4), XContentType.JSON) ); assertThat(illegalArgumentException.getMessage(), containsString("remote cluster groups must not be null or empty")); + + // one invalid privilege + String q5 = """ + { + "remote_cluster": [ + { + "privileges": [ + "monitor_stats", "read_pipeline" + ], + "clusters": [ + "*" + ] + } + ] + }"""; + + ElasticsearchParseException parseException = expectThrows( + ElasticsearchParseException.class, + () -> RoleDescriptor.parserBuilder().build().parse("test", new BytesArray(q5), XContentType.JSON) + ); + assertThat( + parseException.getMessage(), + containsString( + "failed to parse remote_cluster for role [test]. " + + "[monitor_enrich, monitor_stats] are the only values allowed for [privileges] within [remote_cluster]. " + + "Found [monitor_stats, read_pipeline]" + ) + ); } public void testParsingFieldPermissionsUsesCache() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroupTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroupTests.java index cd269bd1a97b3..0b99db826d540 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroupTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionGroupTests.java @@ -16,6 +16,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Locale; +import java.util.Map; import static org.hamcrest.Matchers.containsString; @@ -90,7 +91,7 @@ public void testInvalidValues() { ); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, invalidClusterAlias); - assertEquals("remote_cluster clusters aliases must contain valid non-empty, non-null values", e.getMessage()); + assertThat(e.getMessage(), containsString("remote_cluster clusters aliases must contain valid non-empty, non-null values")); final ThrowingRunnable invalidPermission = randomFrom( () -> new RemoteClusterPermissionGroup(new String[] { null }, new String[] { "bar" }), @@ -100,7 +101,17 @@ public void testInvalidValues() { ); IllegalArgumentException e2 = expectThrows(IllegalArgumentException.class, invalidPermission); - assertEquals("remote_cluster privileges must contain valid non-empty, non-null values", e2.getMessage()); + assertThat(e2.getMessage(), containsString("remote_cluster privileges must contain valid non-empty, non-null values")); + } + + public void testToMap() { + String[] privileges = generateRandomStringArray(5, 5, false, false); + String[] clusters = generateRandomStringArray(5, 5, false, false); + RemoteClusterPermissionGroup remoteClusterPermissionGroup = new RemoteClusterPermissionGroup(privileges, clusters); + assertEquals( + Map.of("privileges", Arrays.asList(privileges), "clusters", Arrays.asList(clusters)), + remoteClusterPermissionGroup.toMap() + ); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java index 5b5a895f12ae8..2c31965009273 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java @@ -15,6 +15,8 @@ import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import org.junit.Before; import java.io.IOException; @@ -27,8 +29,11 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; +import static org.elasticsearch.TransportVersions.ROLE_MONITOR_STATS; import static org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions.ROLE_REMOTE_CLUSTER_PRIVS; +import static org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions.lastTransportVersionPermission; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -85,13 +90,13 @@ public void testMatcher() { for (int i = 0; i < generateRandomGroups(true).size(); i++) { String[] clusters = groupClusters.get(i); for (String cluster : clusters) { - assertTrue(remoteClusterPermission.hasPrivileges(cluster)); - assertFalse(remoteClusterPermission.hasPrivileges(randomAlphaOfLength(20))); + assertTrue(remoteClusterPermission.hasAnyPrivileges(cluster)); + assertFalse(remoteClusterPermission.hasAnyPrivileges(randomAlphaOfLength(20))); } } } - public void testPrivilegeNames() { + public void testCollapseAndRemoveUnsupportedPrivileges() { Map> original = RemoteClusterPermissions.allowedRemoteClusterPermissions; try { // create random groups with random privileges for random clusters @@ -108,7 +113,7 @@ public void testPrivilegeNames() { String[] privileges = groupPrivileges.get(i); String[] clusters = groupClusters.get(i); for (String cluster : clusters) { - String[] found = remoteClusterPermission.privilegeNames(cluster, TransportVersion.current()); + String[] found = remoteClusterPermission.collapseAndRemoveUnsupportedPrivileges(cluster, TransportVersion.current()); Arrays.sort(found); // ensure all lowercase since the privilege names are case insensitive and the method will result in lowercase for (int j = 0; j < privileges.length; j++) { @@ -126,13 +131,14 @@ public void testPrivilegeNames() { // create random groups with random privileges for random clusters List randomGroups = generateRandomGroups(true); // replace a random value with one that is allowed - groupPrivileges.get(0)[0] = "monitor_enrich"; + String singleValidPrivilege = randomFrom(RemoteClusterPermissions.allowedRemoteClusterPermissions.get(TransportVersion.current())); + groupPrivileges.get(0)[0] = singleValidPrivilege; for (int i = 0; i < randomGroups.size(); i++) { String[] privileges = groupPrivileges.get(i); String[] clusters = groupClusters.get(i); for (String cluster : clusters) { - String[] found = remoteClusterPermission.privilegeNames(cluster, TransportVersion.current()); + String[] found = remoteClusterPermission.collapseAndRemoveUnsupportedPrivileges(cluster, TransportVersion.current()); Arrays.sort(found); // ensure all lowercase since the privilege names are case insensitive and the method will result in lowercase for (int j = 0; j < privileges.length; j++) { @@ -149,7 +155,7 @@ public void testPrivilegeNames() { assertFalse(Arrays.equals(privileges, found)); if (i == 0) { // ensure that for the current version we only find the valid "monitor_enrich" - assertThat(Set.of(found), equalTo(Set.of("monitor_enrich"))); + assertThat(Set.of(found), equalTo(Set.of(singleValidPrivilege))); } else { // all other groups should be found to not have any privileges assertTrue(found.length == 0); @@ -159,21 +165,26 @@ public void testPrivilegeNames() { } } - public void testMonitorEnrichPerVersion() { - // test monitor_enrich before, after and on monitor enrich version - String[] privileges = randomBoolean() ? new String[] { "monitor_enrich" } : new String[] { "monitor_enrich", "foo", "bar" }; + public void testPermissionsPerVersion() { + testPermissionPerVersion("monitor_enrich", ROLE_REMOTE_CLUSTER_PRIVS); + testPermissionPerVersion("monitor_stats", ROLE_MONITOR_STATS); + } + + private void testPermissionPerVersion(String permission, TransportVersion version) { + // test permission before, after and on the version + String[] privileges = randomBoolean() ? new String[] { permission } : new String[] { permission, "foo", "bar" }; String[] before = new RemoteClusterPermissions().addGroup(new RemoteClusterPermissionGroup(privileges, new String[] { "*" })) - .privilegeNames("*", TransportVersionUtils.getPreviousVersion(ROLE_REMOTE_CLUSTER_PRIVS)); - // empty set since monitor_enrich is not allowed in the before version + .collapseAndRemoveUnsupportedPrivileges("*", TransportVersionUtils.getPreviousVersion(version)); + // empty set since permissions is not allowed in the before version assertThat(Set.of(before), equalTo(Collections.emptySet())); String[] on = new RemoteClusterPermissions().addGroup(new RemoteClusterPermissionGroup(privileges, new String[] { "*" })) - .privilegeNames("*", ROLE_REMOTE_CLUSTER_PRIVS); - // only monitor_enrich since the other values are not allowed - assertThat(Set.of(on), equalTo(Set.of("monitor_enrich"))); + .collapseAndRemoveUnsupportedPrivileges("*", version); + // the permission is found on that provided version + assertThat(Set.of(on), equalTo(Set.of(permission))); String[] after = new RemoteClusterPermissions().addGroup(new RemoteClusterPermissionGroup(privileges, new String[] { "*" })) - .privilegeNames("*", TransportVersion.current()); - // only monitor_enrich since the other values are not allowed - assertThat(Set.of(after), equalTo(Set.of("monitor_enrich"))); + .collapseAndRemoveUnsupportedPrivileges("*", TransportVersion.current()); + // current version (after the version) has the permission + assertThat(Set.of(after), equalTo(Set.of(permission))); } public void testValidate() { @@ -181,12 +192,70 @@ public void testValidate() { // random values not allowed IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> remoteClusterPermission.validate()); assertTrue(error.getMessage().contains("Invalid remote_cluster permissions found. Please remove the following:")); - assertTrue(error.getMessage().contains("Only [monitor_enrich] are allowed")); + assertTrue(error.getMessage().contains("Only [monitor_enrich, monitor_stats] are allowed")); new RemoteClusterPermissions().addGroup(new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "*" })) .validate(); // no error } + public void testToMap() { + RemoteClusterPermissions remoteClusterPermissions = new RemoteClusterPermissions(); + List groups = generateRandomGroups(randomBoolean()); + for (int i = 0; i < groups.size(); i++) { + remoteClusterPermissions.addGroup(groups.get(i)); + } + List>> asAsMap = remoteClusterPermissions.toMap(); + RemoteClusterPermissions remoteClusterPermissionsAsMap = new RemoteClusterPermissions(asAsMap); + assertEquals(remoteClusterPermissions, remoteClusterPermissionsAsMap); + } + + public void testRemoveUnsupportedPrivileges() { + RemoteClusterPermissions remoteClusterPermissions = new RemoteClusterPermissions(); + RemoteClusterPermissionGroup group = new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "*" }); + remoteClusterPermissions.addGroup(group); + // this privilege is allowed by versions, so nothing should be removed + assertEquals(remoteClusterPermissions, remoteClusterPermissions.removeUnsupportedPrivileges(ROLE_REMOTE_CLUSTER_PRIVS)); + assertEquals(remoteClusterPermissions, remoteClusterPermissions.removeUnsupportedPrivileges(ROLE_MONITOR_STATS)); + + remoteClusterPermissions = new RemoteClusterPermissions(); + if (randomBoolean()) { + group = new RemoteClusterPermissionGroup(new String[] { "monitor_stats" }, new String[] { "*" }); + } else { + // if somehow duplicates end up here, they should not influence removal + group = new RemoteClusterPermissionGroup(new String[] { "monitor_stats", "monitor_stats" }, new String[] { "*" }); + } + remoteClusterPermissions.addGroup(group); + // this single newer privilege is not allowed in the older version, so it should result in an object with no groups + assertNotEquals(remoteClusterPermissions, remoteClusterPermissions.removeUnsupportedPrivileges(ROLE_REMOTE_CLUSTER_PRIVS)); + assertFalse(remoteClusterPermissions.removeUnsupportedPrivileges(ROLE_REMOTE_CLUSTER_PRIVS).hasAnyPrivileges()); + assertEquals(remoteClusterPermissions, remoteClusterPermissions.removeUnsupportedPrivileges(ROLE_MONITOR_STATS)); + + int groupCount = randomIntBetween(1, 5); + remoteClusterPermissions = new RemoteClusterPermissions(); + group = new RemoteClusterPermissionGroup(new String[] { "monitor_enrich", "monitor_stats" }, new String[] { "*" }); + for (int i = 0; i < groupCount; i++) { + remoteClusterPermissions.addGroup(group); + } + // one of the newer privilege is not allowed in the older version, so it should result in a group with only the allowed privilege + RemoteClusterPermissions expected = new RemoteClusterPermissions(); + for (int i = 0; i < groupCount; i++) { + expected.addGroup(new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "*" })); + } + assertEquals(expected, remoteClusterPermissions.removeUnsupportedPrivileges(ROLE_REMOTE_CLUSTER_PRIVS)); + // both privileges allowed in the newer version, so it should not change the permission + assertEquals(remoteClusterPermissions, remoteClusterPermissions.removeUnsupportedPrivileges(ROLE_MONITOR_STATS)); + } + + public void testShortCircuitRemoveUnsupportedPrivileges() { + RemoteClusterPermissions remoteClusterPermissions = new RemoteClusterPermissions(); + assertSame(remoteClusterPermissions, remoteClusterPermissions.removeUnsupportedPrivileges(TransportVersion.current())); + assertSame(remoteClusterPermissions, remoteClusterPermissions.removeUnsupportedPrivileges(lastTransportVersionPermission)); + assertNotSame( + remoteClusterPermissions, + remoteClusterPermissions.removeUnsupportedPrivileges(TransportVersionUtils.getPreviousVersion(lastTransportVersionPermission)) + ); + } + private List generateRandomGroups(boolean fuzzyCluster) { clean(); List groups = new ArrayList<>(); @@ -216,22 +285,48 @@ protected Writeable.Reader instanceReader() { @Override protected RemoteClusterPermissions createTestInstance() { + Set all = RemoteClusterPermissions.allowedRemoteClusterPermissions.values() + .stream() + .flatMap(Set::stream) + .collect(Collectors.toSet()); + List randomPermission = randomList(1, all.size(), () -> randomFrom(all)); return new RemoteClusterPermissions().addGroup( - new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "*" }) + new RemoteClusterPermissionGroup(randomPermission.toArray(new String[0]), new String[] { "*" }) ); } @Override protected RemoteClusterPermissions mutateInstance(RemoteClusterPermissions instance) throws IOException { return new RemoteClusterPermissions().addGroup( - new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "*" }) + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich", "monitor_stats" }, new String[] { "*" }) ).addGroup(new RemoteClusterPermissionGroup(new String[] { "foobar" }, new String[] { "*" })); } @Override protected RemoteClusterPermissions doParseInstance(XContentParser parser) throws IOException { - // fromXContent/parsing isn't supported since we still do old school manual parsing of the role descriptor - return createTestInstance(); + // fromXContent/object parsing isn't supported since we still do old school manual parsing of the role descriptor + // so this test is silly because it only tests we know how to manually parse the test instance in this test + // this is needed since we want the other parts from the AbstractXContentSerializingTestCase suite + RemoteClusterPermissions remoteClusterPermissions = new RemoteClusterPermissions(); + String[] privileges = null; + String[] clusters = null; + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.START_OBJECT) { + continue; + } + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (RoleDescriptor.Fields.PRIVILEGES.match(currentFieldName, parser.getDeprecationHandler())) { + privileges = XContentUtils.readStringArray(parser, false); + + } else if (RoleDescriptor.Fields.CLUSTERS.match(currentFieldName, parser.getDeprecationHandler())) { + clusters = XContentUtils.readStringArray(parser, false); + } + } + remoteClusterPermissions.addGroup(new RemoteClusterPermissionGroup(privileges, clusters)); + return remoteClusterPermissions; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index a71ac6a9b51fd..fb4d822b7655c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -2833,7 +2833,7 @@ public void testSuperuserRole() { is(false) ); assertThat( - superuserRole.remoteCluster().privilegeNames("*", TransportVersion.current()), + superuserRole.remoteCluster().collapseAndRemoveUnsupportedPrivileges("*", TransportVersion.current()), equalTo(RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0])) ); } diff --git a/x-pack/plugin/security/qa/multi-cluster/build.gradle b/x-pack/plugin/security/qa/multi-cluster/build.gradle index c7b8f81bb7876..b8eccb14819a4 100644 --- a/x-pack/plugin/security/qa/multi-cluster/build.gradle +++ b/x-pack/plugin/security/qa/multi-cluster/build.gradle @@ -31,13 +31,15 @@ dependencies { tasks.named("javaRestTest") { enabled = true // This is tested explicitly in bwc test tasks. - exclude '**/RemoteClusterSecurityBwcRestIT.class' + exclude '**/RemoteClusterSecurityBWCToRCS1ClusterRestIT.class' + exclude '**/RemoteClusterSecurityBWCToRCS2ClusterRestIT.class' } -BuildParams.bwcVersions.withWireCompatible(v -> v.before(BuildParams.isSnapshotBuild() ? '8.8.0' : '8.9.1')) { bwcVersion, baseName -> +BuildParams.bwcVersions.withWireCompatible() { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) - include '**/RemoteClusterSecurityBwcRestIT.class' + include '**/RemoteClusterSecurityBWCToRCS1ClusterRestIT.class' + include '**/RemoteClusterSecurityBWCToRCS2ClusterRestIT.class' } } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityBWCRestIT.java similarity index 65% rename from x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java rename to x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityBWCRestIT.java index 17acd258ed34b..20cdbb9f8b0df 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityBWCRestIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.remotecluster; +import org.apache.http.util.EntityUtils; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -15,14 +16,9 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchResponseUtils; -import org.elasticsearch.test.cluster.ElasticsearchCluster; -import org.elasticsearch.test.cluster.local.distribution.DistributionType; -import org.elasticsearch.test.cluster.util.Version; -import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ObjectPath; -import org.junit.ClassRule; -import org.junit.rules.RuleChain; -import org.junit.rules.TestRule; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; import java.util.Arrays; @@ -32,48 +28,21 @@ import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; /** - * BWC test which ensures that users and API keys with defined {@code remote_indices} privileges can be used to query legacy remote clusters + * A set of BWC tests that can be executed with either RCS 1 or RCS 2 against an older fulfilling cluster. */ -public class RemoteClusterSecurityBwcRestIT extends AbstractRemoteClusterSecurityTestCase { +public abstract class AbstractRemoteClusterSecurityBWCRestIT extends AbstractRemoteClusterSecurityTestCase { - private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); + protected abstract boolean isRCS2(); - static { - fulfillingCluster = ElasticsearchCluster.local() - .version(OLD_CLUSTER_VERSION) - .distribution(DistributionType.DEFAULT) - .name("fulfilling-cluster") - .apply(commonClusterConfig) - .setting("xpack.ml.enabled", "false") - .build(); - - queryCluster = ElasticsearchCluster.local() - .version(Version.CURRENT) - .distribution(DistributionType.INTEG_TEST) - .name("query-cluster") - .apply(commonClusterConfig) - .setting("xpack.security.remote_cluster_client.ssl.enabled", "true") - .setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt") - .rolesFile(Resource.fromClasspath("roles.yml")) - .build(); - } - - @ClassRule - // Use a RuleChain to ensure that fulfilling cluster is started before query cluster - public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); - - public void testBwcWithLegacyCrossClusterSearch() throws Exception { - final boolean useProxyMode = randomBoolean(); - // Update remote cluster settings on QC. - setupQueryClusterRemoteClusters(useProxyMode); - // Ensure remote cluster is connected - ensureRemoteFulfillingClusterIsConnected(useProxyMode); + public void testBwcCCSViaRCS1orRCS2() throws Exception { // Fulfilling cluster { @@ -122,19 +91,22 @@ public void testBwcWithLegacyCrossClusterSearch() throws Exception { ] }"""); assertOK(adminClient().performRequest(putRoleRequest)); - // We need to define the same role on QC and FC in order for CCS to work. - final var putRoleRequestFulfilling = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); - putRoleRequestFulfilling.setJsonEntity(""" - { - "cluster": ["manage_own_api_key"], - "indices": [ + if (isRCS2() == false) { + // We need to define the same role on QC and FC in order for CCS to work. + final var putRoleRequestFulfilling = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleRequestFulfilling.setJsonEntity(""" { - "names": ["remote_index1"], - "privileges": ["read", "read_cross_cluster"] - } - ] - }"""); - assertOK(performRequestAgainstFulfillingCluster(putRoleRequestFulfilling)); + "cluster": ["manage_own_api_key"], + "indices": [ + { + "names": ["remote_index1"], + "privileges": ["read", "read_cross_cluster"] + } + ] + }"""); + assertOK(performRequestAgainstFulfillingCluster(putRoleRequestFulfilling)); + } + final var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); putUserRequest.setJsonEntity(""" { @@ -166,7 +138,7 @@ public void testBwcWithLegacyCrossClusterSearch() throws Exception { ], "remote_cluster": [ { - "privileges": ["monitor_enrich"], + "privileges": ["monitor_enrich", "monitor_stats"], "clusters": ["*"] } ] @@ -187,38 +159,35 @@ public void testBwcWithLegacyCrossClusterSearch() throws Exception { // Check that we can search the fulfilling cluster from the querying cluster final boolean alsoSearchLocally = randomBoolean(); + final String remoteClusterName = randomFrom("my_remote_cluster", "*", "my_remote_*"); + final String remoteIndexName = randomFrom("remote_index1", "*"); final var searchRequest = new Request( "GET", String.format( Locale.ROOT, "/%s%s:%s/_search?ccs_minimize_roundtrips=%s", alsoSearchLocally ? "local_index," : "", - randomFrom("my_remote_cluster", "*", "my_remote_*"), - randomFrom("remote_index1", "*"), + remoteClusterName, + remoteIndexName, randomBoolean() ) ); - final String sendRequestWith = randomFrom("user", "apikey"); - final Response response = sendRequestWith.equals("user") - ? performRequestWithRemoteAccessUser(searchRequest) - : performRequestWithApiKey(searchRequest, apiKeyEncoded); + String esqlCommand = String.format(Locale.ROOT, "FROM %s,%s:%s | LIMIT 10", "local_index", remoteClusterName, remoteIndexName); + // send request with user + Response response = performRequestWithRemoteAccessUser(searchRequest); assertOK(response); - final SearchResponse searchResponse; try (var parser = responseAsParser(response)) { - searchResponse = SearchResponseUtils.parseSearchResponse(parser); + assertSearchResponse(SearchResponseUtils.parseSearchResponse(parser), alsoSearchLocally); } - try { - final List actualIndices = Arrays.stream(searchResponse.getHits().getHits()) - .map(SearchHit::getIndex) - .collect(Collectors.toList()); - if (alsoSearchLocally) { - assertThat(actualIndices, containsInAnyOrder("remote_index1", "local_index")); - } else { - assertThat(actualIndices, containsInAnyOrder("remote_index1")); - } - } finally { - searchResponse.decRef(); + assertEsqlResponse(performRequestWithRemoteAccessUser(esqlRequest(esqlCommand))); + + // send request with apikey + response = performRequestWithApiKey(searchRequest, apiKeyEncoded); + assertOK(response); + try (var parser = responseAsParser(response)) { + assertSearchResponse(SearchResponseUtils.parseSearchResponse(parser), alsoSearchLocally); } + assertEsqlResponse(performRequestWithApiKey(esqlRequest(esqlCommand), apiKeyEncoded)); } } @@ -231,6 +200,14 @@ private void ensureRemoteFulfillingClusterIsConnected(boolean useProxyMode) thro final Map remoteInfoMap = responseAsMap(remoteInfoResponse); assertThat(remoteInfoMap, hasKey("my_remote_cluster")); assertThat(org.elasticsearch.xcontent.ObjectPath.eval("my_remote_cluster.connected", remoteInfoMap), is(true)); + if (isRCS2()) { + assertThat( + org.elasticsearch.xcontent.ObjectPath.eval("my_remote_cluster.cluster_credentials", remoteInfoMap), + is("::es_redacted::") // RCS 2.0 + ); + } else { + assertThat(org.elasticsearch.xcontent.ObjectPath.eval("my_remote_cluster.cluster_credentials", remoteInfoMap), nullValue()); + } if (false == useProxyMode) { assertThat( org.elasticsearch.xcontent.ObjectPath.eval("my_remote_cluster.num_nodes_connected", remoteInfoMap), @@ -240,7 +217,17 @@ private void ensureRemoteFulfillingClusterIsConnected(boolean useProxyMode) thro }); } - private void setupQueryClusterRemoteClusters(boolean useProxyMode) throws IOException { + private Response performRequestWithRemoteAccessUser(final Request request) throws IOException { + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", basicAuthHeaderValue(REMOTE_SEARCH_USER, PASS))); + return client().performRequest(request); + } + + private Response performRequestWithApiKey(final Request request, final String encoded) throws IOException { + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + encoded)); + return client().performRequest(request); + } + + private void setupQueryClusterRCS1(boolean useProxyMode) throws IOException { final Settings.Builder builder = Settings.builder(); if (useProxyMode) { builder.put("cluster.remote.my_remote_cluster.mode", "proxy") @@ -252,14 +239,37 @@ private void setupQueryClusterRemoteClusters(boolean useProxyMode) throws IOExce updateClusterSettings(builder.build()); } - private Response performRequestWithRemoteAccessUser(final Request request) throws IOException { - request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", basicAuthHeaderValue(REMOTE_SEARCH_USER, PASS))); - return client().performRequest(request); + private Request esqlRequest(String command) throws IOException { + XContentBuilder body = JsonXContent.contentBuilder(); + body.startObject(); + body.field("query", command); + body.field("include_ccs_metadata", true); + body.endObject(); + Request request = new Request("POST", "_query"); + request.setJsonEntity(org.elasticsearch.common.Strings.toString(body)); + return request; } - private Response performRequestWithApiKey(final Request request, final String encoded) throws IOException { - request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + encoded)); - return client().performRequest(request); + private void assertSearchResponse(SearchResponse searchResponse, boolean alsoSearchLocally) { + try { + final List actualIndices = Arrays.stream(searchResponse.getHits().getHits()) + .map(SearchHit::getIndex) + .collect(Collectors.toList()); + if (alsoSearchLocally) { + assertThat(actualIndices, containsInAnyOrder("remote_index1", "local_index")); + } else { + assertThat(actualIndices, containsInAnyOrder("remote_index1")); + } + } finally { + searchResponse.decRef(); + } } + private void assertEsqlResponse(Response response) throws IOException { + assertOK(response); + String responseAsString = EntityUtils.toString(response.getEntity()); + assertThat(responseAsString, containsString("\"my_remote_cluster\":{\"status\":\"successful\"")); + assertThat(responseAsString, containsString("local_bar")); + assertThat(responseAsString, containsString("bar")); + } } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBWCToRCS1ClusterRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBWCToRCS1ClusterRestIT.java new file mode 100644 index 0000000000000..73e0f096039f9 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBWCToRCS1ClusterRestIT.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +/** + * BWC test which ensures that users and API keys with defined {@code remote_indices}/{@code remote_cluster} privileges can be used + * to query legacy remote clusters when using RCS 1.0. We send the request the to an older fulfilling cluster using RCS 1.0 with a user/role + * and API key where the {@code remote_indices}/{@code remote_cluster} are defined in the newer query cluster. + * All RCS 2.0 config should be effectively ignored when using RCS 1 for CCS. We send to an elder fulfil cluster to help ensure that + * newly introduced RCS 2.0 artifacts are forward compatible from the perspective of the old cluster. For example, a new privilege + * sent to an old cluster should be ignored. + */ +public class RemoteClusterSecurityBWCToRCS1ClusterRestIT extends AbstractRemoteClusterSecurityBWCRestIT { + + private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); + + static { + fulfillingCluster = ElasticsearchCluster.local() + .version(OLD_CLUSTER_VERSION) + .distribution(DistributionType.DEFAULT) + .name("fulfilling-cluster") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + // .setting("logger.org.elasticsearch.xpack.core", "trace") //useful for human debugging + // .setting("logger.org.elasticsearch.xpack.security", "trace") //useful for human debugging + .build(); + + queryCluster = ElasticsearchCluster.local() + .version(Version.CURRENT) + .distribution(DistributionType.DEFAULT) + .setting("xpack.ml.enabled", "false") + .name("query-cluster") + .apply(commonClusterConfig) + .setting("xpack.security.remote_cluster_client.ssl.enabled", "true") + .setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt") + .rolesFile(Resource.fromClasspath("roles.yml")) + .build(); + } + + @ClassRule + // Use a RuleChain to ensure that fulfilling cluster is started before query cluster + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + @Override + protected boolean isRCS2() { + return false; + } + + @Before + @Override + public void setUp() throws Exception { + configureRemoteCluster(REMOTE_CLUSTER_ALIAS, fulfillingCluster, true, randomBoolean(), false); + super.setUp(); + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBWCToRCS2ClusterRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBWCToRCS2ClusterRestIT.java new file mode 100644 index 0000000000000..5e173b72c66de --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBWCToRCS2ClusterRestIT.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +/** + * BWC test which ensures that users and API keys with defined {@code remote_indices}/{@code remote_cluster} privileges can be used + * to query older remote clusters when using RCS 2.0. We send the request the to an older fulfilling cluster using RCS 2.0 with a user/role + * and API key where the {@code remote_indices}/{@code remote_cluster} are defined in the newer query cluster. + * All new RCS 2.0 config should be effectively ignored when sending to older RCS 2.0. For example, a new privilege + * sent to an old cluster should be ignored. + */ +public class RemoteClusterSecurityBWCToRCS2ClusterRestIT extends AbstractRemoteClusterSecurityBWCRestIT { + + private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); + private static final AtomicReference> API_KEY_MAP_REF = new AtomicReference<>(); + + static { + + fulfillingCluster = ElasticsearchCluster.local() + .name("fulfilling-cluster") + .version(OLD_CLUSTER_VERSION) + .distribution(DistributionType.DEFAULT) + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("remote_cluster_server.enabled", "true") + .setting("remote_cluster.port", "0") + .setting("xpack.security.remote_cluster_server.ssl.enabled", "true") + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + // .setting("logger.org.elasticsearch.xpack.core", "trace") //useful for human debugging + // .setting("logger.org.elasticsearch.xpack.security", "trace") //useful for human debugging + .build(); + + queryCluster = ElasticsearchCluster.local() + .name("query-cluster") + .distribution(DistributionType.DEFAULT) + .setting("xpack.ml.enabled", "false") + .apply(commonClusterConfig) + .setting("xpack.security.remote_cluster_client.ssl.enabled", "true") + .setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt") + .keystore("cluster.remote.my_remote_cluster.credentials", () -> { + if (API_KEY_MAP_REF.get() == null) { + final Map apiKeyMap = createCrossClusterAccessApiKey(""" + { + "search": [ + { + "names": ["*"] + } + ] + }"""); + API_KEY_MAP_REF.set(apiKeyMap); + } + return (String) API_KEY_MAP_REF.get().get("encoded"); + }) + .build(); + } + + @ClassRule + // Use a RuleChain to ensure that fulfilling cluster is started before query cluster + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + @Override + protected boolean isRCS2() { + return true; + } + + @Before + @Override + public void setUp() throws Exception { + configureRemoteCluster(REMOTE_CLUSTER_ALIAS, fulfillingCluster, false, randomBoolean(), false); + super.setUp(); + } +} diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestStatsIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestStatsIT.java new file mode 100644 index 0000000000000..e98fcf6f72881 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestStatsIT.java @@ -0,0 +1,266 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.apache.http.util.EntityUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Strings; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchResponseUtils; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.junit.RunnableTestRuleAdapter; +import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.xcontent.XContentType; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteClusterSecurityRestStatsIT extends AbstractRemoteClusterSecurityTestCase { + + private static final AtomicReference> API_KEY_MAP_REF = new AtomicReference<>(); + private static final AtomicReference> REST_API_KEY_MAP_REF = new AtomicReference<>(); + private static final AtomicBoolean SSL_ENABLED_REF = new AtomicBoolean(); + private static final AtomicBoolean NODE1_RCS_SERVER_ENABLED = new AtomicBoolean(); + private static final AtomicBoolean NODE2_RCS_SERVER_ENABLED = new AtomicBoolean(); + private static final int FULFILL_NODE_COUNT = 3; + private static final Logger logger = LogManager.getLogger(RemoteClusterSecurityRestStatsIT.class); + + static { + fulfillingCluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .name("fulfilling-cluster") + .nodes(FULFILL_NODE_COUNT) + .apply(commonClusterConfig) + .setting("remote_cluster.port", "0") + .setting("xpack.security.remote_cluster_server.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + .node(0, spec -> spec.setting("remote_cluster_server.enabled", "true")) + .node(1, spec -> spec.setting("remote_cluster_server.enabled", () -> String.valueOf(NODE1_RCS_SERVER_ENABLED.get()))) + .node(2, spec -> spec.setting("remote_cluster_server.enabled", () -> String.valueOf(NODE2_RCS_SERVER_ENABLED.get()))) + .build(); + + queryCluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .name("query-cluster") + .apply(commonClusterConfig) + .setting("xpack.security.remote_cluster_client.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("cluster.remote.my_remote_cluster.credentials", () -> { + if (API_KEY_MAP_REF.get() == null) { + final Map apiKeyMap = createCrossClusterAccessApiKey(""" + { + "search": [ + { + "names": ["*"] + } + ] + }"""); + API_KEY_MAP_REF.set(apiKeyMap); + } + return (String) API_KEY_MAP_REF.get().get("encoded"); + }) + // Define a bogus API key for another remote cluster + .keystore("cluster.remote.invalid_remote.credentials", randomEncodedApiKey()) + // Define remote with a REST API key to observe expected failure + .keystore("cluster.remote.wrong_api_key_type.credentials", () -> { + if (REST_API_KEY_MAP_REF.get() == null) { + initFulfillingClusterClient(); + final var createApiKeyRequest = new Request("POST", "/_security/api_key"); + createApiKeyRequest.setJsonEntity(""" + { + "name": "rest_api_key" + }"""); + try { + final Response createApiKeyResponse = performRequestWithAdminUser(fulfillingClusterClient, createApiKeyRequest); + assertOK(createApiKeyResponse); + REST_API_KEY_MAP_REF.set(responseAsMap(createApiKeyResponse)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + return (String) REST_API_KEY_MAP_REF.get().get("encoded"); + }) + .rolesFile(Resource.fromClasspath("roles.yml")) + .user(REMOTE_METRIC_USER, PASS.toString(), "read_remote_shared_metrics", false) + .build(); + } + + @ClassRule + // Use a RuleChain to ensure that fulfilling cluster is started before query cluster + // `SSL_ENABLED_REF` is used to control the SSL-enabled setting on the test clusters + // We set it here, since randomization methods are not available in the static initialize context above + public static TestRule clusterRule = RuleChain.outerRule(new RunnableTestRuleAdapter(() -> { + SSL_ENABLED_REF.set(usually()); + NODE1_RCS_SERVER_ENABLED.set(randomBoolean()); + NODE2_RCS_SERVER_ENABLED.set(randomBoolean()); + })).around(fulfillingCluster).around(queryCluster); + + public void testCrossClusterStats() throws Exception { + configureRemoteCluster(); + setupRoleAndUserQueryCluster(); + addDocToIndexFulfillingCluster("index1"); + + // search #1 + searchFulfillingClusterFromQueryCluster("index1"); + Map statsResponseAsMap = getFulfillingClusterStatsFromQueryCluster(); + assertThat(ObjectPath.evaluate(statsResponseAsMap, "ccs.clusters.my_remote_cluster.nodes_count"), equalTo(FULFILL_NODE_COUNT)); + assertThat(ObjectPath.evaluate(statsResponseAsMap, "ccs._search.clusters.my_remote_cluster.total"), equalTo(1)); + int initialIndexCount = ObjectPath.evaluate(statsResponseAsMap, "ccs.clusters.my_remote_cluster.indices_count"); + + // search #2 + searchFulfillingClusterFromQueryCluster("index1"); + statsResponseAsMap = getFulfillingClusterStatsFromQueryCluster(); + assertThat(ObjectPath.evaluate(statsResponseAsMap, "ccs._search.total"), equalTo(2)); + assertThat(ObjectPath.evaluate(statsResponseAsMap, "ccs._search.clusters.my_remote_cluster.total"), equalTo(2)); + + // search #3 + expectThrows(Exception.class, () -> searchFulfillingClusterFromQueryCluster("junk")); + statsResponseAsMap = getFulfillingClusterStatsFromQueryCluster(); + assertThat(ObjectPath.evaluate(statsResponseAsMap, "ccs._search.total"), equalTo(3)); + assertThat(ObjectPath.evaluate(statsResponseAsMap, "ccs._search.clusters.my_remote_cluster.total"), equalTo(2)); + + // search #4 + addDocToIndexFulfillingCluster("index2"); + searchFulfillingClusterFromQueryCluster("index2"); + statsResponseAsMap = getFulfillingClusterStatsFromQueryCluster(); + assertThat(ObjectPath.evaluate(statsResponseAsMap, "ccs._search.total"), equalTo(4)); + assertThat(ObjectPath.evaluate(statsResponseAsMap, "ccs._search.clusters.my_remote_cluster.total"), equalTo(3)); + int updatedIndexCount = ObjectPath.evaluate(statsResponseAsMap, "ccs.clusters.my_remote_cluster.indices_count"); + assertThat(updatedIndexCount, equalTo(initialIndexCount + 1)); + } + + private Map getFulfillingClusterStatsFromQueryCluster() throws IOException { + return getFulfillingClusterStatsFromQueryCluster(false); + } + + private Map getFulfillingClusterStatsFromQueryCluster(boolean humanDebug) throws IOException { + Request stats = new Request("GET", "_cluster/stats?include_remotes=true&filter_path=ccs"); + Response statsResponse = performRequestWithRemoteSearchUser(stats); + if (humanDebug) { + debugResponse(statsResponse); + } + return entityAsMap(statsResponse.getEntity()); + } + + private void searchFulfillingClusterFromQueryCluster(String index, boolean humanDebug) throws IOException { + final var searchRequest = new Request( + "GET", + String.format( + Locale.ROOT, + "/%s:%s/_search?ccs_minimize_roundtrips=%s", + randomFrom("my_remote_cluster", "*", "my_remote_*"), + index, + randomBoolean() + ) + ); + Response response = performRequestWithRemoteSearchUser(searchRequest); + if (humanDebug) { + debugResponse(response); + } + assertOK(response); + final SearchResponse searchResponse = SearchResponseUtils.parseSearchResponse(responseAsParser(response)); + try { + final List actualIndices = Arrays.stream(searchResponse.getHits().getHits()) + .map(SearchHit::getIndex) + .collect(Collectors.toList()); + assertThat(actualIndices, containsInAnyOrder(index)); + + } finally { + searchResponse.decRef(); + } + } + + private void searchFulfillingClusterFromQueryCluster(String index) throws IOException { + searchFulfillingClusterFromQueryCluster(index, false); + } + + private void addDocToIndexFulfillingCluster(String index) throws IOException { + // Index some documents, so we can attempt to search them from the querying cluster + final Request bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(Strings.format(""" + { "index": { "_index": "%s" } } + { "foo": "bar" } + """, index)); + assertOK(performRequestAgainstFulfillingCluster(bulkRequest)); + } + + private void setupRoleAndUserQueryCluster() throws IOException { + final var putRoleRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleRequest.setJsonEntity(""" + { + "description": "Role with privileges for remote indices and stats.", + "cluster": ["monitor_stats"], + "remote_indices": [ + { + "names": ["*"], + "privileges": ["read", "read_cross_cluster"], + "clusters": ["*"] + } + ], + "remote_cluster": [ + { + "privileges": ["monitor_stats"], + "clusters": ["*"] + } + ] + }"""); + assertOK(adminClient().performRequest(putRoleRequest)); + final var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); + putUserRequest.setJsonEntity(""" + { + "password": "x-pack-test-password", + "roles" : ["remote_search"] + }"""); + assertOK(adminClient().performRequest(putUserRequest)); + } + + private Response performRequestWithRemoteSearchUser(final Request request) throws IOException { + request.setOptions( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", headerFromRandomAuthMethod(REMOTE_SEARCH_USER, PASS)) + ); + return client().performRequest(request); + } + + // helper method for humans see the responses for debug purposes, when used will always fail the test + private void debugResponse(Response response) throws IOException { + String jsonString = XContentHelper.convertToJson( + new BytesArray(EntityUtils.toString(response.getEntity())), + true, + true, + XContentType.JSON + ); + logger.error(jsonString); + assertFalse(true); // boom + } +} diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index 667140b849951..8ce7fc77fe4f3 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -828,7 +828,7 @@ public void testRemoteClusterSupportForApiKeys() throws IOException { assertOK(response); assertAPIKeyWithRemoteClusterPermissions(apiKeyId, includeRemoteCluster, false, null, new String[] { "foo", "bar" }); - // create API key as the remote user which does remote_cluster limited_by permissions + // create API key as the remote user which has all remote_cluster permissions via limited_by response = sendRequestAsRemoteUser(createApiKeyRequest); apiKeyId = ObjectPath.createFromResponse(response).evaluate("id"); assertThat(apiKeyId, notNullValue()); @@ -922,7 +922,7 @@ private void assertAPIKeyWithRemoteClusterPermissions( assertNotNull(limitedByRole); List>> remoteCluster = (List>>) limitedByRole.get("remote_cluster"); - assertThat(remoteCluster.get(0).get("privileges"), containsInAnyOrder("monitor_enrich")); + assertThat(remoteCluster.get(0).get("privileges"), containsInAnyOrder("monitor_stats", "monitor_enrich")); assertThat(remoteCluster.get(0).get("clusters"), containsInAnyOrder("remote")); } else { // no limited by permissions diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index d79a3e31c1bc9..2e1a643bf4f4f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -572,7 +572,7 @@ public static void buildRoleFromDescriptors( ); }); - if (remoteClusterPermissions.hasPrivileges()) { + if (remoteClusterPermissions.hasAnyPrivileges()) { builder.addRemoteClusterPermissions(remoteClusterPermissions); } else { builder.addRemoteClusterPermissions(RemoteClusterPermissions.NONE); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java index ac8d84d95fd1d..a64cef366926f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/RoleDescriptorStore.java @@ -150,7 +150,7 @@ public void resolveCrossClusterAccessRoleReference( + "but other privileges found for subject [" + crossClusterAccessRoleReference.getUserPrincipal() + "]"; - logger.debug("{}. Invalid role descriptor: [{}]", message, roleDescriptor); + logger.warn("{}. Invalid role descriptor: [{}]", message, roleDescriptor); listener.onFailure(new IllegalArgumentException(message)); return; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java index d71c2b0d19074..a41c54ada781a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/RBACEngineTests.java @@ -92,6 +92,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeTests; +import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges.ManageApplicationPrivileges; import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; @@ -1312,10 +1313,7 @@ public void testBuildUserPrivilegeResponse() { ) .addRemoteClusterPermissions( new RemoteClusterPermissions().addGroup( - new RemoteClusterPermissionGroup( - RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]), - new String[] { "remote-1" } - ) + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "remote-1" }) ) .addGroup( new RemoteClusterPermissionGroup( @@ -1383,26 +1381,33 @@ public void testBuildUserPrivilegeResponse() { RemoteClusterPermissions remoteClusterPermissions = response.getRemoteClusterPermissions(); String[] allRemoteClusterPermissions = RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0]); - assert allRemoteClusterPermissions.length == 1 - : "if more remote cluster permissions are added this test needs to be updated to ensure the correct remotes receive the " - + "correct permissions. "; - // 2 groups with 3 aliases + assertThat(response.getRemoteClusterPermissions().groups(), iterableWithSize(2)); - assertEquals( - 3, - response.getRemoteClusterPermissions() - .groups() - .stream() - .map(RemoteClusterPermissionGroup::remoteClusterAliases) - .flatMap(Arrays::stream) - .distinct() - .count() + // remote-1 has monitor_enrich permission + // remote-2 and remote-3 have all permissions + assertThat( + response.getRemoteClusterPermissions().groups(), + containsInAnyOrder( + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "remote-1" }), + new RemoteClusterPermissionGroup(allRemoteClusterPermissions, new String[] { "remote-2", "remote-3" }) + ) + ); + + // ensure that all permissions are valid for the current transport version + assertThat( + Arrays.asList(remoteClusterPermissions.collapseAndRemoveUnsupportedPrivileges("remote-1", TransportVersion.current())), + hasItem("monitor_enrich") ); for (String permission : RemoteClusterPermissions.getSupportedRemoteClusterPermissions()) { - assertThat(Arrays.asList(remoteClusterPermissions.privilegeNames("remote-1", TransportVersion.current())), hasItem(permission)); - assertThat(Arrays.asList(remoteClusterPermissions.privilegeNames("remote-2", TransportVersion.current())), hasItem(permission)); - assertThat(Arrays.asList(remoteClusterPermissions.privilegeNames("remote-3", TransportVersion.current())), hasItem(permission)); + assertThat( + Arrays.asList(remoteClusterPermissions.collapseAndRemoveUnsupportedPrivileges("remote-2", TransportVersion.current())), + hasItem(permission) + ); + assertThat( + Arrays.asList(remoteClusterPermissions.collapseAndRemoveUnsupportedPrivileges("remote-3", TransportVersion.current())), + hasItem(permission) + ); } } @@ -1782,7 +1787,10 @@ public void testGetRoleDescriptorsForRemoteClusterForReservedRoles() { new RoleDescriptorsIntersection( new RoleDescriptor( Role.REMOTE_USER_ROLE_NAME, - null, + RemoteClusterPermissions.getSupportedRemoteClusterPermissions() + .stream() + .filter(s -> s.equals(ClusterPrivilegeResolver.MONITOR_STATS.name())) + .toArray(String[]::new), new IndicesPrivileges[] { IndicesPrivileges.builder().indices(".monitoring-*").privileges("read", "read_cross_cluster").build(), IndicesPrivileges.builder().indices("apm-*").privileges("read", "read_cross_cluster").build(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index da903ff7f7177..cef3572ee3ac4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -1158,7 +1158,7 @@ public ClusterPermission.Builder buildPermission(ClusterPermission.Builder build assertHasRemoteIndexGroupsForClusters(forRemote, Set.of("*"), indexGroup("remote-idx-2-*")); assertValidRemoteClusterPermissions(role.remoteCluster(), new String[] { "remote-*" }); assertThat( - role.remoteCluster().privilegeNames("remote-foobar", TransportVersion.current()), + role.remoteCluster().collapseAndRemoveUnsupportedPrivileges("remote-foobar", TransportVersion.current()), equalTo(RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0])) ); } @@ -3322,12 +3322,12 @@ private void assertValidRemoteClusterPermissions(RemoteClusterPermissions permis } private void assertValidRemoteClusterPermissionsParent(RemoteClusterPermissions permissions, String[] aliases) { - assertTrue(permissions.hasPrivileges()); + assertTrue(permissions.hasAnyPrivileges()); for (String alias : aliases) { - assertTrue(permissions.hasPrivileges(alias)); - assertFalse(permissions.hasPrivileges(randomValueOtherThan(alias, () -> randomAlphaOfLength(5)))); + assertTrue(permissions.hasAnyPrivileges(alias)); + assertFalse(permissions.hasAnyPrivileges(randomValueOtherThan(alias, () -> randomAlphaOfLength(5)))); assertThat( - permissions.privilegeNames(alias, TransportVersion.current()), + permissions.collapseAndRemoveUnsupportedPrivileges(alias, TransportVersion.current()), arrayContaining(RemoteClusterPermissions.getSupportedRemoteClusterPermissions().toArray(new String[0])) ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java index a4d9dacd1a63d..af5f44b5989fb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/FileRolesStoreTests.java @@ -388,7 +388,8 @@ public void testParseFileWithRemoteIndicesAndCluster() throws IllegalAccessExcep events.get(4), startsWith( "failed to parse remote_cluster for role [invalid_role_bad_priv_remote_clusters]. " - + "[monitor_enrich] is the only value allowed for [privileges] within [remote_cluster]. skipping role..." + + "[monitor_enrich, monitor_stats] are the only values allowed for [privileges] within [remote_cluster]. " + + "Found [junk]. skipping role..." ) ); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java index e17d651a19748..5b91b774cc435 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java @@ -213,7 +213,7 @@ public void testBuildResponse() throws Exception { ,"remote_cluster":[ { "privileges":[ - "monitor_enrich" + "monitor_enrich", "monitor_stats" ], "clusters":[ "remote-1" @@ -221,7 +221,7 @@ public void testBuildResponse() throws Exception { }, { "privileges":[ - "monitor_enrich" + "monitor_enrich", "monitor_stats" ], "clusters":[ "remote-2", diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml index ef8fab9ca7b6d..d03e6925cab1f 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/privileges/11_builtin.yml @@ -15,5 +15,5 @@ setup: # This is fragile - it needs to be updated every time we add a new cluster/index privilege # I would much prefer we could just check that specific entries are in the array, but we don't have # an assertion for that - - length: { "cluster" : 61 } + - length: { "cluster" : 62 } - length: { "index" : 22 } From 6ab260c3a69ce3bc197873d35683bfb2fd52ccc8 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Fri, 8 Nov 2024 19:45:34 +0100 Subject: [PATCH 04/95] ES|QL: Fix funciton Telemetry tests (#116470) --- muted-tests.yml | 6 ------ .../resources/rest-api-spec/test/esql/60_usage.yml | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 1321cdc2a3d57..718d160994103 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -112,9 +112,6 @@ tests: - class: org.elasticsearch.xpack.remotecluster.RemoteClusterSecurityWithApmTracingRestIT method: testTracingCrossCluster issue: https://github.com/elastic/elasticsearch/issues/112731 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)} - issue: https://github.com/elastic/elasticsearch/issues/115231 - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultE5 issue: https://github.com/elastic/elasticsearch/issues/115361 @@ -279,9 +276,6 @@ tests: - class: org.elasticsearch.smoketest.MlWithSecurityIT method: test {yaml=ml/inference_crud/Test force delete given model with alias referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/116443 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry) non-snapshot version} - issue: https://github.com/elastic/elasticsearch/issues/116448 - class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT method: testILMDownsampleRollingRestart issue: https://github.com/elastic/elasticsearch/issues/114233 diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index bb3345f4118b9..6e7098da33805 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -162,4 +162,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} - - length: {esql.functions: 118} # check the "sister" test above for a likely update to the same esql.functions length check + - length: {esql.functions: 116} # check the "sister" test above for a likely update to the same esql.functions length check From 2acd164c1a2156fcc946406664583b029bcc0072 Mon Sep 17 00:00:00 2001 From: Patrick Doyle <810052+prdoyle@users.noreply.github.com> Date: Fri, 8 Nov 2024 13:46:44 -0500 Subject: [PATCH 05/95] Use different file-based settings error message for invalid JSON and NotMasterException (#116359) * Fixup: remove unused pattern variable from before * Try1 handle XContentParseException * Mocks wrap XContentParseException in ExecutionException like the real code does * onProcessFileChangesException case for XContentParseException * Handle NotMasterException while we're at it. * Cleanup * Use Nikolaj's addFileChangedListener approach to test * Add REPLACE_EXISTING * Remove ATOMIC_MOVE Co-authored-by: Nikolaj Volgushev * Delete stray generated files * Remove unused method --------- Co-authored-by: Nikolaj Volgushev --- .../file/AbstractFileWatchingService.java | 9 ++- .../service/FileSettingsService.java | 19 ++++- .../service/FileSettingsServiceTests.java | 74 ++++++++++++++++++- 3 files changed, 92 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java b/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java index 41998bf974bf9..b7ecd671c7d62 100644 --- a/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java +++ b/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java @@ -302,11 +302,12 @@ final WatchKey enableDirectoryWatcher(WatchKey previousKey, Path settingsDir) th void processSettingsOnServiceStartAndNotifyListeners() throws InterruptedException { try { processFileOnServiceStart(); - for (var listener : eventListeners) { - listener.watchedFileChanged(); - } } catch (IOException | ExecutionException e) { - logger.error(() -> "Error processing watched file: " + watchedFile(), e); + onProcessFileChangesException(e); + return; + } + for (var listener : eventListeners) { + listener.watchedFileChanged(); } } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 601fc3c86d98f..ae9ae6f8b5bf9 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -15,12 +15,14 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.file.MasterNodeFileWatchingService; import org.elasticsearch.env.Environment; +import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.BufferedInputStream; @@ -146,11 +148,20 @@ private void processFileChanges(ReservedStateVersionCheck versionCheck) throws I @Override protected void onProcessFileChangesException(Exception e) { - if (e instanceof ExecutionException && e.getCause() instanceof FailedToCommitClusterStateException f) { - logger.error("Unable to commit cluster state", e); - } else { - super.onProcessFileChangesException(e); + if (e instanceof ExecutionException) { + var cause = e.getCause(); + if (cause instanceof FailedToCommitClusterStateException) { + logger.error("Unable to commit cluster state", e); + return; + } else if (cause instanceof XContentParseException) { + logger.error("Unable to parse settings", e); + return; + } else if (cause instanceof NotMasterException) { + logger.error("Node is no longer master", e); + return; + } } + super.onProcessFileChangesException(e); } @Override diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index aa76245c20679..0db29588c4298 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; import org.junit.After; import org.junit.Before; @@ -55,16 +56,22 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.hasEntry; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -262,6 +269,68 @@ public void testProcessFileChanges() throws Exception { verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_VERSION_ONLY), any()); } + @SuppressWarnings("unchecked") + public void testInvalidJSON() throws Exception { + doAnswer((Answer) invocation -> { + invocation.getArgument(1, XContentParser.class).map(); // Throw if JSON is invalid + ((Consumer) invocation.getArgument(3)).accept(null); + return null; + }).when(controller).process(any(), any(XContentParser.class), any(), any()); + + CyclicBarrier fileChangeBarrier = new CyclicBarrier(2); + fileSettingsService.addFileChangedListener(() -> awaitOrBust(fileChangeBarrier)); + + Files.createDirectories(fileSettingsService.watchedFileDir()); + // contents of the JSON don't matter, we just need a file to exist + writeTestFile(fileSettingsService.watchedFile(), "{}"); + + doAnswer((Answer) invocation -> { + boolean returnedNormally = false; + try { + var result = invocation.callRealMethod(); + returnedNormally = true; + return result; + } catch (XContentParseException e) { + // We're expecting a parse error. processFileChanges specifies that this is supposed to throw ExecutionException. + throw new ExecutionException(e); + } catch (Throwable e) { + throw new AssertionError("Unexpected exception", e); + } finally { + if (returnedNormally == false) { + // Because of the exception, listeners aren't notified, so we need to activate the barrier ourselves + awaitOrBust(fileChangeBarrier); + } + } + }).when(fileSettingsService).processFileChanges(); + + // Establish the initial valid JSON + fileSettingsService.start(); + fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); + awaitOrBust(fileChangeBarrier); + + // Now break the JSON + writeTestFile(fileSettingsService.watchedFile(), "test_invalid_JSON"); + awaitOrBust(fileChangeBarrier); + + verify(fileSettingsService, times(1)).processFileOnServiceStart(); // The initial state + verify(fileSettingsService, times(1)).processFileChanges(); // The changed state + verify(fileSettingsService, times(1)).onProcessFileChangesException( + argThat(e -> e instanceof ExecutionException && e.getCause() instanceof XContentParseException) + ); + + // Note: the name "processFileOnServiceStart" is a bit misleading because it is not + // referring to fileSettingsService.start(). Rather, it is referring to the initialization + // of the watcher thread itself, which occurs asynchronously when clusterChanged is first called. + } + + private static void awaitOrBust(CyclicBarrier barrier) { + try { + barrier.await(20, TimeUnit.SECONDS); + } catch (InterruptedException | BrokenBarrierException | TimeoutException e) { + throw new AssertionError("Unexpected exception waiting for barrier", e); + } + } + @SuppressWarnings("unchecked") public void testStopWorksInMiddleOfProcessing() throws Exception { CountDownLatch processFileLatch = new CountDownLatch(1); @@ -356,10 +425,10 @@ private static void writeTestFile(Path path, String contents) throws IOException Path tempFilePath = createTempFile(); Files.writeString(tempFilePath, contents); try { - Files.move(tempFilePath, path, ATOMIC_MOVE); + Files.move(tempFilePath, path, REPLACE_EXISTING, ATOMIC_MOVE); } catch (AtomicMoveNotSupportedException e) { logger.info("Atomic move not available. Falling back on non-atomic move to write [{}]", path.toAbsolutePath()); - Files.move(tempFilePath, path); + Files.move(tempFilePath, path, REPLACE_EXISTING); } } @@ -374,4 +443,5 @@ private static void longAwait(CountDownLatch latch) { fail(e, "longAwait: interrupted waiting for CountDownLatch to reach zero"); } } + } From 04dbd8cfe17e04b84799009bef133c8775211421 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 9 Nov 2024 06:20:18 +1100 Subject: [PATCH 06/95] Mute org.elasticsearch.reservedstate.service.FileSettingsServiceTests testInvalidJSON #116521 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 718d160994103..b0e532bc56210 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -287,6 +287,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/116484 - class: org.elasticsearch.xpack.kql.query.KqlQueryBuilderTests issue: https://github.com/elastic/elasticsearch/issues/116487 +- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests + method: testInvalidJSON + issue: https://github.com/elastic/elasticsearch/issues/116521 # Examples: # From 5a3e2135d061d884ce4ba227a8afad54f8939d8a Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Fri, 8 Nov 2024 19:53:12 +0000 Subject: [PATCH 07/95] Update deprecation message in `EnrichPolicy` (#116504) The `elasticsearch_version` property of enrich policies is deprecated with a message saying that it will be removed in ES 9.0. It's still deprecated, but it won't be removed in 9.0. So this change makes the deprecation message less specific. --- x-pack/plugin/build.gradle | 1 + .../org/elasticsearch/xpack/core/enrich/EnrichPolicy.java | 2 +- x-pack/plugin/enrich/qa/rest/build.gradle | 5 +++++ .../resources/rest-api-spec/test/enrich/10_basic.yml | 2 +- 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 193a82436f26a..1750ccbb8c0ce 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -89,5 +89,6 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("esql/80_text/reverse text", "The output type changed from TEXT to KEYWORD.") task.skipTest("esql/80_text/values function", "The output type changed from TEXT to KEYWORD.") task.skipTest("privileges/11_builtin/Test get builtin privileges" ,"unnecessary to test compatibility") + task.skipTest("enrich/10_basic/Test using the deprecated elasticsearch_version field results in a warning", "The deprecation message was changed") }) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichPolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichPolicy.java index 916bd3c62a598..9bbe41b4797fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichPolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichPolicy.java @@ -36,7 +36,7 @@ public final class EnrichPolicy implements Writeable, ToXContentFragment { private static final String ELASTICEARCH_VERSION_DEPRECATION_MESSAGE = - "the [elasticsearch_version] field of an enrich policy has no effect and will be removed in Elasticsearch 9.0"; + "the [elasticsearch_version] field of an enrich policy has no effect and will be removed in a future version of Elasticsearch"; private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(EnrichPolicy.class); diff --git a/x-pack/plugin/enrich/qa/rest/build.gradle b/x-pack/plugin/enrich/qa/rest/build.gradle index fdaddbc1f9290..064e362c77e6c 100644 --- a/x-pack/plugin/enrich/qa/rest/build.gradle +++ b/x-pack/plugin/enrich/qa/rest/build.gradle @@ -32,3 +32,8 @@ testClusters.configureEach { setting 'xpack.security.enabled', 'false' requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.4.0") } + +tasks.named("yamlRestCompatTestTransform").configure({ task -> + task.skipTest("enrich/10_basic/Test using the deprecated elasticsearch_version field results in a warning", "The deprecation message was changed") +}) + diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/10_basic.yml index c9b05c4e13a85..17e5e0cfb0759 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/enrich/10_basic.yml @@ -69,7 +69,7 @@ setup: - do: warnings: - - "the [elasticsearch_version] field of an enrich policy has no effect and will be removed in Elasticsearch 9.0" + - "the [elasticsearch_version] field of an enrich policy has no effect and will be removed in a future version of Elasticsearch" enrich.put_policy: name: policy-crud-warning body: From eac2a01eec49cba455228d10df524efbe4033139 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 9 Nov 2024 06:56:46 +1100 Subject: [PATCH 08/95] Mute org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotsCanMatchOnCoordinatorIntegTests testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQueryingAnyNodeWhenTheyAreOutsideOfTheQueryRange #116523 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index b0e532bc56210..1b768222f8bae 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -290,6 +290,9 @@ tests: - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testInvalidJSON issue: https://github.com/elastic/elasticsearch/issues/116521 +- class: org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotsCanMatchOnCoordinatorIntegTests + method: testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQueryingAnyNodeWhenTheyAreOutsideOfTheQueryRange + issue: https://github.com/elastic/elasticsearch/issues/116523 # Examples: # From 51a9863e91801d4afa5f63b3e920c231c685fa71 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Fri, 8 Nov 2024 15:15:47 -0600 Subject: [PATCH 09/95] muting RemoteClusterPermissionsTests.testCollapseAndRemoveUnsupportedPrivileges (#116524) --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 1b768222f8bae..ae40f339818a9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -293,6 +293,9 @@ tests: - class: org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotsCanMatchOnCoordinatorIntegTests method: testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQueryingAnyNodeWhenTheyAreOutsideOfTheQueryRange issue: https://github.com/elastic/elasticsearch/issues/116523 +- class: org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionsTests + method: testCollapseAndRemoveUnsupportedPrivileges + issue: https://github.com/elastic/elasticsearch/issues/116520 # Examples: # From e96c75568de0a8412f6e49fcd422f455cf67a91d Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Sat, 9 Nov 2024 17:36:10 +1100 Subject: [PATCH 10/95] Mute org.elasticsearch.xpack.logsdb.qa.StandardVersusLogsIndexModeRandomDataDynamicMappingChallengeRestIT testMatchAllQuery #116536 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index ae40f339818a9..110d0d9bd5b5c 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -296,6 +296,9 @@ tests: - class: org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionsTests method: testCollapseAndRemoveUnsupportedPrivileges issue: https://github.com/elastic/elasticsearch/issues/116520 +- class: org.elasticsearch.xpack.logsdb.qa.StandardVersusLogsIndexModeRandomDataDynamicMappingChallengeRestIT + method: testMatchAllQuery + issue: https://github.com/elastic/elasticsearch/issues/116536 # Examples: # From fae80f85a9f5e4e5ba358bad2953d33d1367e253 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Mon, 11 Nov 2024 09:12:57 +1100 Subject: [PATCH 11/95] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=ml/inference_crud/Test force delete given model referenced by pipeline} #116555 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 110d0d9bd5b5c..1da1e370bfd12 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -299,6 +299,9 @@ tests: - class: org.elasticsearch.xpack.logsdb.qa.StandardVersusLogsIndexModeRandomDataDynamicMappingChallengeRestIT method: testMatchAllQuery issue: https://github.com/elastic/elasticsearch/issues/116536 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=ml/inference_crud/Test force delete given model referenced by pipeline} + issue: https://github.com/elastic/elasticsearch/issues/116555 # Examples: # From 46b17bd550a17d6989136e96d122d182adb96644 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 11 Nov 2024 09:29:14 +0100 Subject: [PATCH 12/95] Two small improvemetns to IndexNameExpressionResolver (#116552) Not using an iterator loop for the mostly single item list saves measurable runtime in the benchmarks for the resolver. Also, cleaned up a redundant method argument. --- .../metadata/IndexNameExpressionResolver.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 39499253c8790..bf80c38d64a4e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -402,8 +402,10 @@ Index[] concreteIndices(Context context, String... indexExpressions) { resolveIndicesForDataStream(context, dataStream, concreteIndicesResult); } } else { - for (Index index : indexAbstraction.getIndices()) { - if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + List indices = indexAbstraction.getIndices(); + for (int i = 0, n = indices.size(); i < n; i++) { + Index index = indices.get(i); + if (shouldTrackConcreteIndex(context, index)) { concreteIndicesResult.add(index); } } @@ -421,7 +423,7 @@ Index[] concreteIndices(Context context, String... indexExpressions) { private static void resolveIndicesForDataStream(Context context, DataStream dataStream, Set concreteIndicesResult) { if (shouldIncludeRegularIndices(context.getOptions())) { for (Index index : dataStream.getIndices()) { - if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + if (shouldTrackConcreteIndex(context, index)) { concreteIndicesResult.add(index); } } @@ -430,7 +432,7 @@ private static void resolveIndicesForDataStream(Context context, DataStream data // We short-circuit here, if failure indices are not allowed and they can be skipped if (context.getOptions().allowFailureIndices() || context.getOptions().ignoreUnavailable() == false) { for (Index index : dataStream.getFailureIndices().getIndices()) { - if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + if (shouldTrackConcreteIndex(context, index)) { concreteIndicesResult.add(index); } } @@ -565,7 +567,7 @@ private static IndexNotFoundException notFoundException(String... indexExpressio return infe; } - private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions options, Index index) { + private static boolean shouldTrackConcreteIndex(Context context, Index index) { if (context.systemIndexAccessLevel == SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY && context.netNewSystemIndexPredicate.test(index.getName())) { // Exclude this one as it's a net-new system index, and we explicitly don't want those. @@ -575,7 +577,7 @@ private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions DataStream parentDataStream = context.getState().metadata().getIndicesLookup().get(index.getName()).getParentDataStream(); if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { if (parentDataStream.isFailureStoreIndex(index.getName())) { - if (options.ignoreUnavailable()) { + if (context.options.ignoreUnavailable()) { return false; } else { throw new FailureIndexNotSupportedException(index); @@ -585,6 +587,7 @@ private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions } final IndexMetadata imd = context.state.metadata().index(index); if (imd.getState() == IndexMetadata.State.CLOSE) { + IndicesOptions options = context.options; if (options.forbidClosedIndices() && options.ignoreUnavailable() == false) { throw new IndexClosedException(index); } else { From 14944f2d3bc9868ce175424a3dc89b5fb136a6ad Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Mon, 11 Nov 2024 20:14:10 +1100 Subject: [PATCH 13/95] Mute org.elasticsearch.search.basic.SearchWithRandomIOExceptionsIT testRandomDirectoryIOExceptions #114824 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 1da1e370bfd12..a00fecc253a7b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -302,6 +302,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_crud/Test force delete given model referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/116555 +- class: org.elasticsearch.search.basic.SearchWithRandomIOExceptionsIT + method: testRandomDirectoryIOExceptions + issue: https://github.com/elastic/elasticsearch/issues/114824 # Examples: # From ddd606d360c03ef6741b235c452f1d6952ee2b9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Slobodan=20Adamovi=C4=87?= Date: Mon, 11 Nov 2024 10:42:49 +0100 Subject: [PATCH 14/95] Unmute SecurityWithBasicLicenseIT (#116300) Test failure issue got closed without actually unmuting this test. Locally, this test passes. Relates https://github.com/elastic/elasticsearch/issues/99169 --- .../elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java index 3dd678046ea5f..324850f158268 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityWithBasicLicenseIT.java @@ -29,7 +29,6 @@ public class SecurityWithBasicLicenseIT extends SecurityInBasicRestTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99169") public void testWithBasicLicense() throws Exception { checkLicenseType("basic"); checkSecurityEnabled(false); From 09146792256211b47549e162af5177309b4c3107 Mon Sep 17 00:00:00 2001 From: Jack Pan <35284546+jackpan123@users.noreply.github.com> Date: Mon, 11 Nov 2024 18:10:05 +0800 Subject: [PATCH 15/95] Remove trailing semicolon in REPEAT function example (#116218) Remove trailing semicolon in REPEAT function example (Closes #116156 ) --- docs/reference/esql/functions/kibana/definition/repeat.json | 2 +- docs/reference/esql/functions/kibana/docs/repeat.md | 2 +- .../esql/qa/testFixtures/src/main/resources/string.csv-spec | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/reference/esql/functions/kibana/definition/repeat.json b/docs/reference/esql/functions/kibana/definition/repeat.json index b8660b6362e30..201484cf7aa6f 100644 --- a/docs/reference/esql/functions/kibana/definition/repeat.json +++ b/docs/reference/esql/functions/kibana/definition/repeat.json @@ -42,7 +42,7 @@ } ], "examples" : [ - "ROW a = \"Hello!\"\n| EVAL triple_a = REPEAT(a, 3);" + "ROW a = \"Hello!\"\n| EVAL triple_a = REPEAT(a, 3)" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/docs/repeat.md b/docs/reference/esql/functions/kibana/docs/repeat.md index cc46e8282d9fe..4949d86a28f46 100644 --- a/docs/reference/esql/functions/kibana/docs/repeat.md +++ b/docs/reference/esql/functions/kibana/docs/repeat.md @@ -7,5 +7,5 @@ Returns a string constructed by concatenating `string` with itself the specified ``` ROW a = "Hello!" -| EVAL triple_a = REPEAT(a, 3); +| EVAL triple_a = REPEAT(a, 3) ``` diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index de5981df999c7..963245f9f0ea6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -1655,8 +1655,9 @@ repeat required_capability: repeat // tag::repeat[] ROW a = "Hello!" -| EVAL triple_a = REPEAT(a, 3); +| EVAL triple_a = REPEAT(a, 3) // end::repeat[] +; // tag::repeat-result[] a:keyword | triple_a:keyword From 3f6fda631b0ae032a61bd91e2c7587c55848ada4 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 11 Nov 2024 11:55:19 +0100 Subject: [PATCH 16/95] Fix/cleanup two spots in open-PIT request handling (#116553) Two things fixed here: 1. Don't fork just to send the response, it's unnecessary. Serializing the ID might take a little time but if it's really an issue we should optimize it rather than forking just to send a single response. 2. Handle finding a connection cleanly, don't allow the exception to bubble up and fail the phase, this may cause leaks. --- .../TransportOpenPointInTimeAction.java | 38 +++++-------------- 1 file changed, 9 insertions(+), 29 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 010f96f212116..eee65134eae33 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; @@ -257,16 +256,17 @@ protected void executePhaseOnShard( SearchShardTarget shard, SearchActionListener phaseListener ) { - final ShardOpenReaderRequest shardRequest = new ShardOpenReaderRequest( - shardIt.shardId(), - shardIt.getOriginalIndices(), - pitRequest.keepAlive() - ); - Transport.Connection connection = connectionLookup.apply(shardIt.getClusterAlias(), shard.getNodeId()); + final Transport.Connection connection; + try { + connection = connectionLookup.apply(shardIt.getClusterAlias(), shard.getNodeId()); + } catch (Exception e) { + phaseListener.onFailure(e); + return; + } transportService.sendChildRequest( connection, OPEN_SHARD_READER_CONTEXT_NAME, - shardRequest, + new ShardOpenReaderRequest(shardIt.shardId(), shardIt.getOriginalIndices(), pitRequest.keepAlive()), task, new ActionListenerResponseHandler<>( phaseListener, @@ -279,29 +279,9 @@ protected void executePhaseOnShard( @Override protected SearchPhase getNextPhase() { return new SearchPhase(getName()) { - - private void onExecuteFailure(Exception e) { - onPhaseFailure(this, "sending response failed", e); - } - @Override public void run() { - execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - onExecuteFailure(e); - } - - @Override - protected void doRun() { - sendSearchResponse(SearchResponseSections.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); - } - - @Override - public boolean isForceExecution() { - return true; // we already created the PIT, no sense in rejecting the task that sends the response. - } - }); + sendSearchResponse(SearchResponseSections.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); } }; } From 60de8ed05c6ff3a46fa7f0be5b32f2b0706adafe Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 11 Nov 2024 11:55:52 +0100 Subject: [PATCH 17/95] Cleanup IndexActionIT (#116554) We can use the hit count assertion here, no need to be tricky. Also, this can be a single loop nowadays, the two loops are a leftover from when this was testing with types. --- .../elasticsearch/indexing/IndexActionIT.java | 65 +++---------------- 1 file changed, 9 insertions(+), 56 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java index 37fbc95d56506..84abb57b7821e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indexing/IndexActionIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.util.ArrayList; import java.util.List; @@ -28,7 +27,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicIntegerArray; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -42,7 +41,6 @@ public class IndexActionIT extends ESIntegTestCase { public void testAutoGenerateIdNoDuplicates() throws Exception { int numberOfIterations = scaledRandomIntBetween(10, 50); for (int i = 0; i < numberOfIterations; i++) { - Exception firstError = null; createIndex("test"); int numOfDocs = randomIntBetween(10, 100); logger.info("indexing [{}] docs", numOfDocs); @@ -52,51 +50,9 @@ public void testAutoGenerateIdNoDuplicates() throws Exception { } indexRandom(true, builders); logger.info("verifying indexed content"); - int numOfChecks = randomIntBetween(8, 12); + int numOfChecks = randomIntBetween(16, 24); for (int j = 0; j < numOfChecks; j++) { - try { - logger.debug("running search with all types"); - assertResponse(prepareSearch("test"), response -> { - if (response.getHits().getTotalHits().value() != numOfDocs) { - final String message = "Count is " - + response.getHits().getTotalHits().value() - + " but " - + numOfDocs - + " was expected. " - + ElasticsearchAssertions.formatShardStatus(response); - logger.error("{}. search response: \n{}", message, response); - fail(message); - } - }); - } catch (Exception e) { - logger.error("search for all docs types failed", e); - if (firstError == null) { - firstError = e; - } - } - try { - logger.debug("running search with a specific type"); - assertResponse(prepareSearch("test"), response -> { - if (response.getHits().getTotalHits().value() != numOfDocs) { - final String message = "Count is " - + response.getHits().getTotalHits().value() - + " but " - + numOfDocs - + " was expected. " - + ElasticsearchAssertions.formatShardStatus(response); - logger.error("{}. search response: \n{}", message, response); - fail(message); - } - }); - } catch (Exception e) { - logger.error("search for all docs of a specific type failed", e); - if (firstError == null) { - firstError = e; - } - } - } - if (firstError != null) { - fail(firstError.getMessage()); + assertHitCount(prepareSearch("test"), numOfDocs); } internalCluster().wipeIndices("test"); } @@ -147,16 +103,13 @@ public void testCreatedFlagParallelExecution() throws Exception { List> tasks = new ArrayList<>(taskCount); final Random random = random(); for (int i = 0; i < taskCount; i++) { - tasks.add(new Callable() { - @Override - public Void call() throws Exception { - int docId = random.nextInt(docCount); - DocWriteResponse indexResponse = indexDoc("test", Integer.toString(docId), "field1", "value"); - if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) { - createdCounts.incrementAndGet(docId); - } - return null; + tasks.add(() -> { + int docId = random.nextInt(docCount); + DocWriteResponse indexResponse = indexDoc("test", Integer.toString(docId), "field1", "value"); + if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) { + createdCounts.incrementAndGet(docId); } + return null; }); } From 64c362b154534371a8f90dd8cd42cbba71a7f825 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Mon, 11 Nov 2024 13:19:20 +0200 Subject: [PATCH 18/95] Adding more retriever examples to documentation (#116196) --- .../retrievers-examples.asciidoc | 428 ++++++++++++++++++ .../retrievers-overview.asciidoc | 121 +++-- .../search-your-data/search-api.asciidoc | 1 - .../search-your-data.asciidoc | 1 + 4 files changed, 504 insertions(+), 47 deletions(-) create mode 100644 docs/reference/search/search-your-data/retrievers-examples.asciidoc diff --git a/docs/reference/search/search-your-data/retrievers-examples.asciidoc b/docs/reference/search/search-your-data/retrievers-examples.asciidoc new file mode 100644 index 0000000000000..8cd1a4bf5ce98 --- /dev/null +++ b/docs/reference/search/search-your-data/retrievers-examples.asciidoc @@ -0,0 +1,428 @@ +[[retrievers-examples]] +=== Retrievers examples + +Learn how to combine different retrievers in these hands-on examples. +To demonstrate the full functionality of retrievers, these examples require access to a <> set up using the <>. + +[discrete] +[[retrievers-examples-setup]] +==== Add example data + +To begin with, we'll set up the necessary services and have them in place for later use. + +[source,js] +---- +// Setup rerank task stored as `my-rerank-model` +PUT _inference/rerank/my-rerank-model +{ + "service": "cohere", + "service_settings": { + "model_id": "rerank-english-v3.0", + "api_key": "{{COHERE_API_KEY}}" + } +} +---- +//NOTCONSOLE + +Now that we have our reranking service in place, lets create the `retrievers_example` index, and add some documents to it. +[source,js] +---- +PUT retrievers_example +{ + "mappings": { + "properties": { + "vector": { + "type": "dense_vector", + "dims": 3, + "similarity": "l2_norm", + "index": true + }, + "text": { + "type": "text" + }, + "year": { + "type": "integer" + }, + "topic": { + "type": "keyword" + } + } + } +} +---- +//NOTCONSOLE + +[source,js] +---- +POST /retrievers_example/_doc/1 +{ + "vector": [0.23, 0.67, 0.89], + "text": "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences in data-rich environments.", + "year": 2024, + "topic": ["llm", "ai", "information_retrieval"] +} + +POST /retrievers_example/_doc/2 +{ + "vector": [0.12, 0.56, 0.78], + "text": "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved health outcomes.", + "year": 2023, + "topic": ["ai", "medicine"] +} + +POST /retrievers_example/_doc/3 +{ + "vector": [0.45, 0.32, 0.91], + "text": "AI is redefining security by enabling advanced threat detection, proactive risk analysis, and dynamic defenses against increasingly sophisticated cyber threats.", + "year": 2024, + "topic": ["ai", "security"] +} + +POST /retrievers_example/_doc/4 +{ + "vector": [0.34, 0.21, 0.98], + "text": "Elastic introduces Elastic AI Assistant, the open, generative AI sidekick powered by ESRE to democratize cybersecurity and enable users of every skill level.", + "year": 2023, + "topic": ["ai", "elastic", "assistant"] +} + +POST /retrievers_example/_doc/5 +{ + "vector": [0.11, 0.65, 0.47], + "text": "Learn how to spin up a deployment of our hosted Elasticsearch Service and use Elastic Observability to gain deeper insight into the behavior of your applications and systems.", + "year": 2024, + "topic": ["documentation", "observability", "elastic"] +} + +---- +//NOTCONSOLE + +Now that we also have our documents in place, let's try to run some queries using retrievers. + +[discrete] +[[retrievers-examples-combining-standard-knn-retrievers-with-rrf]] +==== Example: Combining query and kNN with RRF + +First, let's examine how to combine two different types of queries: a `kNN` query and a +`query_string` query. While these queries may produce scores in different ranges, we can use +Reciprocal Rank Fusion (`rrf`) to combine the results and generate a merged final result +list. + +To implement this in the retriever framework, we start with the top-level element: our `rrf` +retriever. This retriever operates on top of two other retrievers: a `knn` retriever and a +`standard` retriever. Our query structure would look like this: + +[source,js] +---- +GET /retrievers_example/_search +{ + "retriever":{ + "rrf": { + "retrievers":[ + { + "standard":{ + "query":{ + "query_string":{ + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "_source": ["text", "topic"] +} +---- +//NOTCONSOLE + +[discrete] +[[retrievers-examples-collapsing-retriever-results]] +==== Example: Grouping results by year with `collapse` + +In our result set, we have many documents with the same `year` value. We can clean this +up using the `collapse` parameter with our retriever. This enables grouping results by +any field and returns only the highest-scoring document from each group. In this example +we'll collapse our results based on the `year` field. + +[source,js] +---- +GET /retrievers_example/_search +{ + "retriever":{ + "rrf": { + "retrievers":[ + { + "standard":{ + "query":{ + "query_string":{ + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "collapse": { + "field": "year", + "inner_hits": { + "name": "topic related documents", + "_source": ["text", "year"] + } + }, + "_source": ["text", "topic"] +} +---- +//NOTCONSOLE + +[discrete] +[[retrievers-examples-text-similarity-reranker-on-top-of-rrf]] +==== Example: Rerank results of an RRF retriever + +Previously, we used a `text_similarity_reranker` retriever within an `rrf` retriever. +Because retrievers support full composability, we can also rerank the results of an +`rrf` retriever. Let's apply this to our first example. + +[source,js] +---- +GET retrievers_example/_search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "rrf": { + "retrievers": [ + { + "standard":{ + "query":{ + "query_string":{ + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "What are the state of the art applications of AI in information retrieval?" + } + }, + "_source": ["text", "topic"] +} + +---- +//NOTCONSOLE + +[discrete] +[[retrievers-examples-rrf-ranking-on-text-similarity-reranker-results]] +==== Example: RRF with semantic reranker + +For this example, we'll replace our semantic query with the `my-rerank-model` +reranker we previously configured. Since this is a reranker, it needs an initial pool of +documents to work with. In this case, we'll filter for documents about `ai` topics. + +[source,js] +---- +GET /retrievers_example/_search +{ + "retriever": { + "rrf": { + "retrievers": [ + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + }, + { + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "term": { + "topic": "ai" + } + } + } + }, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "Can I use generative AI to identify user intent and improve search relevance?" + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "_source": [ + "text", + "topic" + ] +} +---- +//NOTCONSOLE + +[discrete] +[[retrievers-examples-chaining-text-similarity-reranker-retrievers]] +==== Example: Chaining multiple semantic rerankers + +Full composability means we can chain together multiple retrievers of the same type. For instance, imagine we have a computationally expensive reranker that's specialized for AI content. We can rerank the results of a `text_similarity_reranker` using another `text_similarity_reranker` retriever. Each reranker can operate on different fields and/or use different inference services. + +[source,js] +---- +GET retrievers_example/_search +{ + "retriever": { + "text_similarity_reranker": { + "retriever": { + "text_similarity_reranker": { + "retriever": { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + }, + "rank_window_size": 100, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "What are the state of the art applications of AI in information retrieval?" + } + }, + "rank_window_size": 10, + "field": "text", + "inference_id": "my-other-more-expensive-rerank-model", + "inference_text": "Applications of Large Language Models in technology and their impact on user satisfaction" + } + }, + "_source": [ + "text", + "topic" + ] +} +---- +//NOTCONSOLE + + +Note that our example applies two reranking steps. First, we rerank the top 100 +documents from the `knn` search using the `my-rerank-model` reranker. Then we +pick the top 10 results and rerank them using the more fine-grained +`my-other-more-expensive-rerank-model`. + +[discrete] +[[retrievers-examples-rrf-and-aggregations]] +==== Example: Combine RRF with aggregations + +Retrievers support both composability and most of the standard `_search` functionality. For instance, +we can compute aggregations with the `rrf` retriever. When using a compound retriever, +the aggregations are computed based on its nested retrievers. In the following example, +the `terms` aggregation for the `topic` field will include all results, not just the top `rank_window_size`, +from the 2 nested retrievers, i.e. all documents whose `year` field is greater than 2023, and whose `topic` field +matches the term `elastic`. + +[source,js] +---- +GET retrievers_example/_search +{ + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "range": { + "year": { + "gt": 2023 + } + } + } + } + }, + { + "standard": { + "query": { + "term": { + "topic": "elastic" + } + } + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "_source": [ + "text", + "topic" + ], + "aggs": { + "topics": { + "terms": { + "field": "topic" + } + } + } +} +---- +//NOTCONSOLE diff --git a/docs/reference/search/search-your-data/retrievers-overview.asciidoc b/docs/reference/search/search-your-data/retrievers-overview.asciidoc index 8e5955fc41782..1771b5bb0d849 100644 --- a/docs/reference/search/search-your-data/retrievers-overview.asciidoc +++ b/docs/reference/search/search-your-data/retrievers-overview.asciidoc @@ -1,5 +1,5 @@ [[retrievers-overview]] -=== Retrievers +== Retrievers A retriever is an abstraction that was added to the Search API in *8.14.0* and was made generally available in *8.16.0*. This abstraction enables the configuration of multi-stage retrieval pipelines within a single `_search` call. @@ -11,7 +11,7 @@ For implementation details, including notable restrictions, check out the [discrete] [[retrievers-overview-types]] -==== Retriever types +=== Retriever types Retrievers come in various types, each tailored for different search operations. The following retrievers are currently available: @@ -34,7 +34,8 @@ Used for <>. Requires first creating a `rerank` task using the <>. [discrete] -==== What makes retrievers useful? +[[retrievers-overview-why-are-they-useful]] +=== What makes retrievers useful? Here's an overview of what makes retrievers useful and how they differ from regular queries. @@ -66,65 +67,90 @@ When using compound retrievers, only the query element is allowed, which enforce [discrete] [[retrievers-overview-example]] -==== Example +=== Example -The following example demonstrates the powerful queries that we can now compose, and how retrievers simplify this process. -We can use any combination of retrievers we want, propagating the results of a nested retriever to its parent. -In this scenario, we'll make use of 4 of our currently available retrievers, i.e. `standard`, `knn`, `text_similarity_reranker` and `rrf`. -See <> for the complete list of available retrievers. - -We'll first combine the results of a `semantic` query using the `standard` retriever, and that of a `knn` search on a dense vector field, using `rrf` to get the top 100 results. -Finally, we'll then rerank the top-50 results of `rrf` using the `text_similarity_reranker` +The following example demonstrates how using retrievers simplify the composability of queries for RRF ranking. [source,js] ---- GET example-index/_search { "retriever": { - "text_similarity_reranker": { - "retriever": { - "rrf": { - "retrievers": [ - { - "standard": { - "query": { - "semantic": { - "field": "inference_field", - "query": "state of the art vector database" - } - } - } - }, - { - "knn": { - "query_vector": [ - 0.54, - ..., - 0.245 - ], - "field": "embedding", - "k": 10, - "num_candidates": 15 + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "sparse_vector": { + "field": "vector.tokens", + "inference_id": "my-elser-endpoint", + "query": "What blue shoes are on sale?" + } + } + } + }, + { + "standard": { + "query": { + "match": { + "text": "blue shoes sale" } } - ], - "rank_window_size": 100, - "rank_constant": 10 + } } - }, - "rank_window_size": 50, - "field": "description", - "inference_text": "what's the best way to create complex pipelines and retrieve documents?", - "inference_id": "my-awesome-rerank-model" + ] } } } ---- //NOTCONSOLE +This example demonstrates how you can combine different retrieval strategies into a single `retriever` pipeline. + +Compare to `RRF` with `sub_searches` approach (which is deprecated as of 8.16.0): + +.*Expand* for example +[%collapsible] +============== + +[source,js] +---- +GET example-index/_search +{ + "sub_searches":[ + { + "query":{ + "match":{ + "text":"blue shoes sale" + } + } + }, + { + "query":{ + "sparse_vector": { + "field": "vector.tokens", + "inference_id": "my-elser-endoint", + "query": "What blue shoes are on sale?" + } + } + } + ], + "rank":{ + "rrf":{ + "rank_window_size":50, + "rank_constant":20 + } + } +} +---- +//NOTCONSOLE +============== + +For more examples on how to use retrievers, please refer to <>. + [discrete] [[retrievers-overview-glossary]] -==== Glossary +=== Glossary Here are some important terms: @@ -143,7 +169,7 @@ Special compound retrievers that reorder hits and may adjust the number of hits, [discrete] [[retrievers-overview-play-in-search]] -==== Retrievers in action +=== Retrievers in action The Search Playground builds Elasticsearch queries using the retriever abstraction. It automatically detects the fields and types in your index and builds a retriever tree based on your selections. @@ -154,6 +180,9 @@ Refer to the {kibana-ref}/playground.html[Playground documentation] for more inf [discrete] [[retrievers-overview-api-reference]] -==== API reference +=== API reference For implementation details, including notable restrictions, check out the <> in the Search API docs. + + +include::retrievers-examples.asciidoc[] diff --git a/docs/reference/search/search-your-data/search-api.asciidoc b/docs/reference/search/search-your-data/search-api.asciidoc index 13cea537ea4fb..a9e74d54dd9d9 100644 --- a/docs/reference/search/search-your-data/search-api.asciidoc +++ b/docs/reference/search/search-your-data/search-api.asciidoc @@ -530,5 +530,4 @@ include::retrieve-inner-hits.asciidoc[] include::search-shard-routing.asciidoc[] include::search-using-query-rules.asciidoc[] include::search-template.asciidoc[] -include::retrievers-overview.asciidoc[] diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index cd2b418a7e79b..82541412db4bd 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -43,6 +43,7 @@ DSL, with a simplified user experience. Create search applications based on your results directly in the Kibana Search UI. include::search-api.asciidoc[] +include::retrievers-overview.asciidoc[] include::knn-search.asciidoc[] include::semantic-search.asciidoc[] include::search-across-clusters.asciidoc[] From 9e087921e5c2c4dfb87504eb95698ea25cfd796e Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Mon, 11 Nov 2024 12:24:45 +0000 Subject: [PATCH 19/95] Reenable compat tests of enrich policy deprecation (#116581) In https://github.com/elastic/elasticsearch/pull/116504, we changed a deprecation message, and although this didn't break BWC because deprecation messages aren't part of the API contract, it did break the compat tests which asserted on the message. We therefore suppressed the compat tests in question. In https://github.com/elastic/elasticsearch/pull/116522, we backported that change to the `8.x` branch. So the compat tests on `main` are now asserting the correct message, and so pass, and can be reenabled. --- x-pack/plugin/build.gradle | 1 - x-pack/plugin/enrich/qa/rest/build.gradle | 4 ---- 2 files changed, 5 deletions(-) diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 1750ccbb8c0ce..193a82436f26a 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -89,6 +89,5 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("esql/80_text/reverse text", "The output type changed from TEXT to KEYWORD.") task.skipTest("esql/80_text/values function", "The output type changed from TEXT to KEYWORD.") task.skipTest("privileges/11_builtin/Test get builtin privileges" ,"unnecessary to test compatibility") - task.skipTest("enrich/10_basic/Test using the deprecated elasticsearch_version field results in a warning", "The deprecation message was changed") }) diff --git a/x-pack/plugin/enrich/qa/rest/build.gradle b/x-pack/plugin/enrich/qa/rest/build.gradle index 064e362c77e6c..f96eff5f933c4 100644 --- a/x-pack/plugin/enrich/qa/rest/build.gradle +++ b/x-pack/plugin/enrich/qa/rest/build.gradle @@ -33,7 +33,3 @@ testClusters.configureEach { requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.4.0") } -tasks.named("yamlRestCompatTestTransform").configure({ task -> - task.skipTest("enrich/10_basic/Test using the deprecated elasticsearch_version field results in a warning", "The deprecation message was changed") -}) - From 91559da015abc4fd5851eb768d3af8884efa9c7c Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Mon, 11 Nov 2024 13:37:23 +0100 Subject: [PATCH 20/95] Use retry logic and real file system in file settings ITs (#116392) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Several file-settings ITs fail (rarely) with exceptions like: ``` java.nio.file.AccessDeniedException: C:\Users\jenkins\workspace\platform-support\14\server\build\testrun\internalClusterTest\temp\org.elasticsearch.reservedstate.service.SnaphotsAndFileSettingsIT_5733F2A737542BE-001\tempFile-001.tmp -> C:\Users\jenkins\workspace\platform-support\14\server\build\testrun\internalClusterTest\temp\org.elasticsearch.reservedstate.service.SnaphotsAndFileSettingsIT_5733F2A737542BE-001\tempDir-002\config\operator\settings.json |   at sun.nio.fs.WindowsException.translateToIOException(WindowsException.java:89) |   -- | --   |   | at sun.nio.fs.WindowsException.rethrowAsIOException(WindowsException.java:103) |     |   | at sun.nio.fs.WindowsFileCopy.move(WindowsFileCopy.java:317) |     |   | at sun.nio.fs.WindowsFileSystemProvider.move(WindowsFileSystemProvider.java:293) |     |   | at org.apache.lucene.tests.mockfile.FilterFileSystemProvider.move(FilterFileSystemProvider.java:144) |     |   | at org.apache.lucene.tests.mockfile.FilterFileSystemProvider.move(FilterFileSystemProvider.java:144) |     |   | at org.apache.lucene.tests.mockfile.FilterFileSystemProvider.move(FilterFileSystemProvider.java:144) |     |   | at org.apache.lucene.tests.mockfile.FilterFileSystemProvider.move(FilterFileSystemProvider.java:144) |     |   | at java.nio.file.Files.move(Files.java:1430) |     |   | at org.elasticsearch.reservedstate.service.SnaphotsAndFileSettingsIT.writeJSONFile(SnaphotsAndFileSettingsIT.java:86) |     |   | at org.elasticsearch.reservedstate.service.SnaphotsAndFileSettingsIT.testRestoreWithPersistedFileSettings(SnaphotsAndFileSettingsIT.java:321) ``` This happens in Windows file systems, due to a race condition where the file settings service is reading the settings file concurrently with the test trying to modify it (a no-go in Windows). It turns out we have already addressed this with a retry for one test suite (https://github.com/elastic/elasticsearch/pull/91863), plus addressed a related issue around mock windows file-systems misbehaving (https://github.com/elastic/elasticsearch/pull/92653). This PR extends the above fixes to all file-settings related ITs. --- .../ComponentTemplatesFileSettingsIT.java | 16 +---- .../service/FileSettingsServiceIT.java | 67 ++++++++++-------- .../service/RepositoriesFileSettingsIT.java | 16 +---- .../service/SnapshotsAndFileSettingsIT.java | 34 +-------- .../RoleMappingFileSettingsIT.java | 69 +++++++++++-------- .../FileSettingsRoleMappingsRestartIT.java | 13 ++-- ...eanupRoleMappingDuplicatesMigrationIT.java | 14 ++-- 7 files changed, 101 insertions(+), 128 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java index 45e370a2e2252..8e0dee2396411 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.reservedstate.service; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.template.get.GetComponentTemplateAction; @@ -26,16 +27,12 @@ import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.ByteArrayInputStream; import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -54,6 +51,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +@LuceneTestCase.SuppressFileSystems("*") public class ComponentTemplatesFileSettingsIT extends ESIntegTestCase { private static AtomicLong versionCounter = new AtomicLong(1); @@ -365,15 +363,7 @@ private void assertMasterNode(Client client, String node) throws ExecutionExcept } private void writeJSONFile(String node, String json) throws Exception { - long version = versionCounter.incrementAndGet(); - - FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); - - Files.createDirectories(fileSettingsService.watchedFileDir()); - Path tempFilePath = createTempFile(); - - Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); + FileSettingsServiceIT.writeJSONFile(node, json, logger, versionCounter.incrementAndGet()); } private Tuple setupClusterStateListener(String node) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index f9122ccfb4a3e..90326abb381d0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -10,6 +10,7 @@ package org.elasticsearch.reservedstate.service; import org.apache.logging.log4j.Logger; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -20,6 +21,7 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; @@ -27,7 +29,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.junit.Before; -import java.nio.charset.StandardCharsets; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; @@ -50,6 +52,7 @@ import static org.hamcrest.Matchers.nullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +@LuceneTestCase.SuppressFileSystems("*") public class FileSettingsServiceIT extends ESIntegTestCase { private final AtomicLong versionCounter = new AtomicLong(1); @@ -129,29 +132,37 @@ private void assertMasterNode(Client client, String node) { ); } - public static void writeJSONFile(String node, String json, AtomicLong versionCounter, Logger logger, boolean incrementVersion) - throws Exception { - long version = incrementVersion ? versionCounter.incrementAndGet() : versionCounter.get(); - + public static void writeJSONFile(String node, String json, Logger logger, Long version) throws Exception { FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); Files.createDirectories(fileSettingsService.watchedFileDir()); Path tempFilePath = createTempFile(); - String settingsFileContent = Strings.format(json, version); - Files.write(tempFilePath, settingsFileContent.getBytes(StandardCharsets.UTF_8)); - logger.info("--> Before writing new settings file with version [{}]", version); - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); - logger.info("--> After writing new settings file: [{}]", settingsFileContent); - } - - public static void writeJSONFile(String node, String json, AtomicLong versionCounter, Logger logger) throws Exception { - writeJSONFile(node, json, versionCounter, logger, true); + String jsonWithVersion = Strings.format(json, version); + logger.info("--> before writing JSON config to node {} with path {}", node, tempFilePath); + logger.info(jsonWithVersion); + + Files.writeString(tempFilePath, jsonWithVersion); + int retryCount = 0; + do { + try { + // this can fail on Windows because of timing + Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); + logger.info("--> after writing JSON config to node {} with path {}", node, tempFilePath); + return; + } catch (IOException e) { + logger.info("--> retrying writing a settings file [{}]", retryCount); + if (retryCount == 4) { // retry 5 times + throw e; + } + Thread.sleep(retryDelay(retryCount)); + retryCount++; + } + } while (true); } - public static void writeJSONFileWithoutVersionIncrement(String node, String json, AtomicLong versionCounter, Logger logger) - throws Exception { - writeJSONFile(node, json, versionCounter, logger, false); + private static long retryDelay(int retryCount) { + return 100 * (1 << retryCount) + Randomness.get().nextInt(10); } private Tuple setupCleanupClusterStateListener(String node) { @@ -245,7 +256,7 @@ public void testSettingsApplied() throws Exception { assertTrue(masterFileSettingsService.watching()); assertFalse(dataFileSettingsService.watching()); - writeJSONFile(masterNode, testJSON, versionCounter, logger); + writeJSONFile(masterNode, testJSON, logger, versionCounter.incrementAndGet()); assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "50mb"); } @@ -260,7 +271,7 @@ public void testSettingsAppliedOnStart() throws Exception { // In internal cluster tests, the nodes share the config directory, so when we write with the data node path // the master will pick it up on start - writeJSONFile(dataNode, testJSON, versionCounter, logger); + writeJSONFile(dataNode, testJSON, logger, versionCounter.incrementAndGet()); logger.info("--> start master node"); final String masterNode = internalCluster().startMasterOnlyNode(); @@ -288,7 +299,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { assertBusy(() -> assertTrue(masterFileSettingsService.watching())); logger.info("--> write some settings"); - writeJSONFile(masterNode, testJSON, versionCounter, logger); + writeJSONFile(masterNode, testJSON, logger, versionCounter.incrementAndGet()); assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "50mb"); logger.info("--> restart master"); @@ -366,7 +377,7 @@ public void testErrorSaved() throws Exception { assertTrue(masterFileSettingsService.watching()); assertFalse(dataFileSettingsService.watching()); - writeJSONFile(masterNode, testErrorJSON, versionCounter, logger); + writeJSONFile(masterNode, testErrorJSON, logger, versionCounter.incrementAndGet()); assertClusterStateNotSaved(savedClusterState.v1(), savedClusterState.v2()); } @@ -390,14 +401,14 @@ public void testErrorCanRecoverOnRestart() throws Exception { assertTrue(masterFileSettingsService.watching()); assertFalse(dataFileSettingsService.watching()); - writeJSONFile(masterNode, testErrorJSON, versionCounter, logger); + writeJSONFile(masterNode, testErrorJSON, logger, versionCounter.incrementAndGet()); AtomicLong metadataVersion = savedClusterState.v2(); assertClusterStateNotSaved(savedClusterState.v1(), metadataVersion); assertHasErrors(metadataVersion, "not_cluster_settings"); // write valid json without version increment to simulate ES being able to process settings after a restart (usually, this would be // due to a code change) - writeJSONFileWithoutVersionIncrement(masterNode, testJSON, versionCounter, logger); + writeJSONFile(masterNode, testJSON, logger, versionCounter.get()); internalCluster().restartNode(masterNode); ensureGreen(); @@ -426,14 +437,14 @@ public void testNewErrorOnRestartReprocessing() throws Exception { assertTrue(masterFileSettingsService.watching()); assertFalse(dataFileSettingsService.watching()); - writeJSONFile(masterNode, testErrorJSON, versionCounter, logger); + writeJSONFile(masterNode, testErrorJSON, logger, versionCounter.incrementAndGet()); AtomicLong metadataVersion = savedClusterState.v2(); assertClusterStateNotSaved(savedClusterState.v1(), metadataVersion); assertHasErrors(metadataVersion, "not_cluster_settings"); // write json with new error without version increment to simulate ES failing to process settings after a restart for a new reason // (usually, this would be due to a code change) - writeJSONFileWithoutVersionIncrement(masterNode, testOtherErrorJSON, versionCounter, logger); + writeJSONFile(masterNode, testOtherErrorJSON, logger, versionCounter.get()); assertHasErrors(metadataVersion, "not_cluster_settings"); internalCluster().restartNode(masterNode); ensureGreen(); @@ -461,7 +472,7 @@ public void testSettingsAppliedOnMasterReElection() throws Exception { assertTrue(masterFileSettingsService.watching()); - writeJSONFile(masterNode, testJSON, versionCounter, logger); + writeJSONFile(masterNode, testJSON, logger, versionCounter.incrementAndGet()); assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "50mb"); internalCluster().stopCurrentMasterNode(); @@ -476,13 +487,13 @@ public void testSettingsAppliedOnMasterReElection() throws Exception { ensureStableCluster(3); savedClusterState = setupCleanupClusterStateListener(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), testCleanupJSON, versionCounter, logger); + writeJSONFile(internalCluster().getMasterName(), testCleanupJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); savedClusterState = setupClusterStateListener(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), testJSON43mb, versionCounter, logger); + writeJSONFile(internalCluster().getMasterName(), testJSON43mb, logger, versionCounter.incrementAndGet()); assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "43mb"); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java index 54ba74a62890d..7b284979611e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.reservedstate.service; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; @@ -22,7 +23,6 @@ import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.test.ESIntegTestCase; @@ -30,9 +30,6 @@ import java.io.ByteArrayInputStream; import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -49,6 +46,7 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +@LuceneTestCase.SuppressFileSystems("*") public class RepositoriesFileSettingsIT extends ESIntegTestCase { private static AtomicLong versionCounter = new AtomicLong(1); @@ -102,15 +100,7 @@ private void assertMasterNode(Client client, String node) throws ExecutionExcept } private void writeJSONFile(String node, String json) throws Exception { - long version = versionCounter.incrementAndGet(); - - FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); - - Files.createDirectories(fileSettingsService.watchedFileDir()); - Path tempFilePath = createTempFile(); - - Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); + FileSettingsServiceIT.writeJSONFile(node, json, logger, versionCounter.incrementAndGet()); } private Tuple setupClusterStateListener(String node) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java index 7e13402b7e66a..7d47ed391199c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java @@ -19,9 +19,7 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; @@ -29,11 +27,7 @@ import org.elasticsearch.snapshots.SnapshotState; import org.junit.After; -import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -78,34 +72,8 @@ public void cleanUp() throws Exception { awaitNoMoreRunningOperations(); } - private long retryDelay(int retryCount) { - return 100 * (1 << retryCount) + Randomness.get().nextInt(10); - } - private void writeJSONFile(String node, String json) throws Exception { - long version = versionCounter.incrementAndGet(); - - FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); - - Files.createDirectories(fileSettingsService.watchedFileDir()); - Path tempFilePath = createTempFile(); - - Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); - int retryCount = 0; - do { - try { - // this can fail on Windows because of timing - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); - return; - } catch (IOException e) { - logger.info("--> retrying writing a settings file [" + retryCount + "]"); - if (retryCount == 4) { // retry 5 times - throw e; - } - Thread.sleep(retryDelay(retryCount)); - retryCount++; - } - } while (true); + FileSettingsServiceIT.writeJSONFile(node, json, logger, versionCounter.incrementAndGet()); } private Tuple setupClusterStateListener(String node) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java index 9e36055e917a6..5be00ae3bfa0c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.integration; import org.apache.logging.log4j.Logger; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -20,6 +21,7 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; @@ -42,6 +44,7 @@ import org.junit.After; import java.io.ByteArrayInputStream; +import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -73,6 +76,7 @@ /** * Tests that file settings service can properly add role mappings. */ +@LuceneTestCase.SuppressFileSystems("*") public class RoleMappingFileSettingsIT extends NativeRealmIntegTestCase { private static AtomicLong versionCounter = new AtomicLong(1); @@ -154,32 +158,37 @@ public void cleanUp() { updateClusterSettings(Settings.builder().putNull("indices.recovery.max_bytes_per_sec")); } - public static void writeJSONFile(String node, String json, Logger logger, AtomicLong versionCounter) throws Exception { - writeJSONFile(node, json, logger, versionCounter, true); - } - - public static void writeJSONFileWithoutVersionIncrement(String node, String json, Logger logger, AtomicLong versionCounter) - throws Exception { - writeJSONFile(node, json, logger, versionCounter, false); - } - - private static void writeJSONFile(String node, String json, Logger logger, AtomicLong versionCounter, boolean incrementVersion) - throws Exception { - long version = incrementVersion ? versionCounter.incrementAndGet() : versionCounter.get(); - + public static void writeJSONFile(String node, String json, Logger logger, Long version) throws Exception { FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); - assertTrue(fileSettingsService.watching()); - - Files.deleteIfExists(fileSettingsService.watchedFile()); Files.createDirectories(fileSettingsService.watchedFileDir()); Path tempFilePath = createTempFile(); + String jsonWithVersion = Strings.format(json, version); logger.info("--> before writing JSON config to node {} with path {}", node, tempFilePath); - logger.info(Strings.format(json, version)); - Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); - logger.info("--> after writing JSON config to node {} with path {}", node, tempFilePath); + logger.info(jsonWithVersion); + + Files.writeString(tempFilePath, jsonWithVersion); + int retryCount = 0; + do { + try { + // this can fail on Windows because of timing + Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); + logger.info("--> after writing JSON config to node {} with path {}", node, tempFilePath); + return; + } catch (IOException e) { + logger.info("--> retrying writing a settings file [{}]", retryCount); + if (retryCount == 4) { // retry 5 times + throw e; + } + Thread.sleep(retryDelay(retryCount)); + retryCount++; + } + } while (true); + } + + private static long retryDelay(int retryCount) { + return 100 * (1 << retryCount) + Randomness.get().nextInt(10); } public static Tuple setupClusterStateListener(String node, String expectedKey) { @@ -320,7 +329,7 @@ public void testClusterStateRoleMappingsAddedThenDeleted() throws Exception { ensureGreen(); var savedClusterState = setupClusterStateListener(internalCluster().getMasterName(), "everyone_kibana"); - writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter); + writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter.incrementAndGet()); assertRoleMappingsSaveOK(savedClusterState.v1(), savedClusterState.v2()); logger.info("---> cleanup cluster settings..."); @@ -333,7 +342,7 @@ public void testClusterStateRoleMappingsAddedThenDeleted() throws Exception { savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -373,7 +382,7 @@ public void testGetRoleMappings() throws Exception { } var savedClusterState = setupClusterStateListener(internalCluster().getMasterName(), "everyone_kibana"); - writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter); + writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -415,7 +424,8 @@ public void testGetRoleMappings() throws Exception { ); savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + String node = internalCluster().getMasterName(); + writeJSONFile(node, emptyJSON, logger, versionCounter.incrementAndGet()); awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -465,7 +475,7 @@ public void testErrorSaved() throws Exception { // save an empty file to clear any prior state, this ensures we don't get a stale file left over by another test var savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -490,7 +500,8 @@ public void testErrorSaved() throws Exception { } ); - writeJSONFile(internalCluster().getMasterName(), testErrorJSON, logger, versionCounter); + String node = internalCluster().getMasterName(); + writeJSONFile(node, testErrorJSON, logger, versionCounter.incrementAndGet()); awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -515,7 +526,8 @@ public void testRoleMappingApplyWithSecurityIndexClosed() throws Exception { var closeIndexResponse = indicesAdmin().close(new CloseIndexRequest(INTERNAL_SECURITY_MAIN_INDEX_7)).get(); assertTrue(closeIndexResponse.isAcknowledged()); - writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter); + String node = internalCluster().getMasterName(); + writeJSONFile(node, testJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -550,7 +562,8 @@ public void testRoleMappingApplyWithSecurityIndexClosed() throws Exception { } } finally { savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + String node = internalCluster().getMasterName(); + writeJSONFile(node, emptyJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java index 15892c8d021f0..ef8f2cfc0d411 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.security; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.core.Tuple; +import org.elasticsearch.integration.RoleMappingFileSettingsIT; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.SecurityIntegTestCase; @@ -29,12 +31,11 @@ import static org.elasticsearch.integration.RoleMappingFileSettingsIT.setupClusterStateListener; import static org.elasticsearch.integration.RoleMappingFileSettingsIT.setupClusterStateListenerForCleanup; -import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFile; -import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFileWithoutVersionIncrement; import static org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata.METADATA_NAME_FIELD; import static org.hamcrest.Matchers.containsInAnyOrder; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +@LuceneTestCase.SuppressFileSystems("*") public class FileSettingsRoleMappingsRestartIT extends SecurityIntegTestCase { private static final int MAX_WAIT_TIME_SECONDS = 20; @@ -116,7 +117,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { awaitFileSettingsWatcher(); logger.info("--> write some role mappings, no other file settings"); - writeJSONFile(masterNode, testJSONOnlyRoleMappings, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, testJSONOnlyRoleMappings, logger, versionCounter.incrementAndGet()); assertRoleMappingsInClusterStateWithAwait( savedClusterState, @@ -196,7 +197,7 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex Tuple savedClusterState = setupClusterStateListener(masterNode, "everyone_kibana_alone"); awaitFileSettingsWatcher(); logger.info("--> write some role mappings, no other file settings"); - writeJSONFile(masterNode, testJSONOnlyRoleMappings, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, testJSONOnlyRoleMappings, logger, versionCounter.incrementAndGet()); assertRoleMappingsInClusterStateWithAwait( savedClusterState, @@ -226,7 +227,7 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex ); // write without version increment and assert that change gets applied on restart - writeJSONFileWithoutVersionIncrement(masterNode, testJSONOnlyUpdatedRoleMappings, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, testJSONOnlyUpdatedRoleMappings, logger, versionCounter.get()); logger.info("--> restart master"); internalCluster().restartNode(masterNode); ensureGreen(); @@ -288,7 +289,7 @@ private void cleanupClusterStateAndAssertNoMappings(String masterNode) throws Ex var savedClusterState = setupClusterStateListenerForCleanup(masterNode); awaitFileSettingsWatcher(); logger.info("--> remove the role mappings with an empty settings file"); - writeJSONFile(masterNode, emptyJSON, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, emptyJSON, logger, versionCounter.incrementAndGet()); boolean awaitSuccessful = savedClusterState.v1().await(MAX_WAIT_TIME_SECONDS, TimeUnit.SECONDS); assertTrue(awaitSuccessful); // ensure cluster-state update got propagated to expected version diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java index 63c510062bdad..e7f544399bdf0 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.integration.RoleMappingFileSettingsIT; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.SecurityIntegTestCase; @@ -40,7 +41,6 @@ import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.integration.RoleMappingFileSettingsIT.setupClusterStateListener; -import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFile; import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_DATA_KEY; import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_KEY; import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7; @@ -138,7 +138,7 @@ public void testMigrationSuccessful() throws Exception { // Setup listener to wait for role mapping var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); // Write role mappings - writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter.incrementAndGet()); assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); @@ -170,7 +170,7 @@ public void testMigrationSuccessfulNoOverlap() throws Exception { // Setup listener to wait for role mapping var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); // Write role mappings with fallback name, this should block any security migration - writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter.incrementAndGet()); assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); @@ -202,7 +202,7 @@ public void testMigrationSuccessfulNoNative() throws Exception { // Setup listener to wait for role mapping var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); // Write role mappings with fallback name, this should block any security migration - writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter.incrementAndGet()); assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); @@ -228,7 +228,7 @@ public void testMigrationFallbackNamePreCondition() throws Exception { // Setup listener to wait for role mapping var nameNotAvailableListener = setupClusterStateListener(masterNode, "name_not_available_after_deserialization"); // Write role mappings with fallback name, this should block any security migration - writeJSONFile(masterNode, TEST_JSON_WITH_FALLBACK_NAME, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_FALLBACK_NAME, logger, versionCounter.incrementAndGet()); assertTrue(nameNotAvailableListener.v1().await(20, TimeUnit.SECONDS)); // Create a native role mapping to create security index and trigger migration @@ -249,7 +249,7 @@ public void testMigrationFallbackNamePreCondition() throws Exception { assertThat(status, equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.NOT_READY)); // Write file without fallback name in it to unblock migration - writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter.incrementAndGet()); waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); } @@ -282,7 +282,7 @@ public void testSkipMigrationEmptyFileBasedMappings() throws Exception { // Setup listener to wait for any role mapping var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode); // Write role mappings - writeJSONFile(masterNode, TEST_JSON_WITH_EMPTY_ROLE_MAPPINGS, logger, versionCounter); + RoleMappingFileSettingsIT.writeJSONFile(masterNode, TEST_JSON_WITH_EMPTY_ROLE_MAPPINGS, logger, versionCounter.incrementAndGet()); assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); // Create a native role mapping to create security index and trigger migration (skipped initially) From 53f6080b605414e7962e5d9fd0aa8eaa80fc29b8 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 11 Nov 2024 07:40:36 -0600 Subject: [PATCH 21/95] Adding a deprecation info API warning for data streams with old indices (#116447) --- docs/changelog/116447.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../DataStreamDeprecationChecks.java | 74 +++++++++++ .../xpack/deprecation/DeprecationChecks.java | 6 + .../deprecation/DeprecationInfoAction.java | 39 +++++- .../TransportDeprecationInfoAction.java | 2 + .../DataStreamDeprecationChecksTests.java | 124 ++++++++++++++++++ .../DeprecationInfoActionResponseTests.java | 47 ++++++- 8 files changed, 294 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/116447.yaml create mode 100644 x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java create mode 100644 x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java diff --git a/docs/changelog/116447.yaml b/docs/changelog/116447.yaml new file mode 100644 index 0000000000000..8c0cea4b54578 --- /dev/null +++ b/docs/changelog/116447.yaml @@ -0,0 +1,5 @@ +pr: 116447 +summary: Adding a deprecation info API warning for data streams with old indices +area: Data streams +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 3134eb4966115..1c7e1eee5df65 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -190,6 +190,7 @@ static TransportVersion def(int id) { public static final TransportVersion LOGSDB_TELEMETRY_STATS = def(8_785_00_0); public static final TransportVersion KQL_QUERY_ADDED = def(8_786_00_0); public static final TransportVersion ROLE_MONITOR_STATS = def(8_787_00_0); + public static final TransportVersion DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK = def(8_788_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java new file mode 100644 index 0000000000000..ee029d01427aa --- /dev/null +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecks.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.deprecation; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; + +import java.util.List; + +import static java.util.Map.entry; +import static java.util.Map.ofEntries; + +public class DataStreamDeprecationChecks { + static DeprecationIssue oldIndicesCheck(DataStream dataStream, ClusterState clusterState) { + List backingIndices = dataStream.getIndices(); + boolean hasOldIndices = backingIndices.stream() + .anyMatch(index -> clusterState.metadata().index(index).getCompatibilityVersion().before(IndexVersions.V_8_0_0)); + if (hasOldIndices) { + long totalIndices = backingIndices.size(); + List oldIndices = backingIndices.stream() + .filter(index -> clusterState.metadata().index(index).getCompatibilityVersion().before(IndexVersions.V_8_0_0)) + .toList(); + long totalOldIndices = oldIndices.size(); + long totalOldSearchableSnapshots = oldIndices.stream() + .filter(index -> clusterState.metadata().index(index).isSearchableSnapshot()) + .count(); + long totalOldPartiallyMountedSearchableSnapshots = oldIndices.stream() + .filter(index -> clusterState.metadata().index(index).isPartialSearchableSnapshot()) + .count(); + long totalOldFullyMountedSearchableSnapshots = totalOldSearchableSnapshots - totalOldPartiallyMountedSearchableSnapshots; + return new DeprecationIssue( + DeprecationIssue.Level.CRITICAL, + "Old data stream with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", + "This data stream has backing indices that were created before Elasticsearch 8.0.0", + false, + ofEntries( + entry( + "backing_indices", + ofEntries( + entry("count", totalIndices), + entry( + "need_upgrading", + ofEntries( + entry("count", totalOldIndices), + entry( + "searchable_snapshots", + ofEntries( + entry("count", totalOldSearchableSnapshots), + entry("fully_mounted", ofEntries(entry("count", totalOldFullyMountedSearchableSnapshots))), + entry( + "partially_mounted", + ofEntries(entry("count", totalOldPartiallyMountedSearchableSnapshots)) + ) + ) + ) + ) + ) + ) + ) + ) + ); + } + return null; + } +} diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index 2f875cc1a3fa9..c80f26cda7b36 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -16,6 +17,7 @@ import java.util.List; import java.util.Objects; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -97,6 +99,10 @@ private DeprecationChecks() {} IndexDeprecationChecks::deprecatedCamelCasePattern ); + static List> DATA_STREAM_CHECKS = List.of( + DataStreamDeprecationChecks::oldIndicesCheck + ); + /** * helper utility function to reduce repeat of running a specific {@link List} of checks. * diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java index cb9efd526fb29..cd26e23394e81 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -42,6 +43,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -144,10 +146,11 @@ private static Map> getMergedIssuesToNodesMap( } public static class Response extends ActionResponse implements ToXContentObject { - static final Set RESERVED_NAMES = Set.of("cluster_settings", "node_settings", "index_settings"); + static final Set RESERVED_NAMES = Set.of("cluster_settings", "node_settings", "index_settings", "data_streams"); private final List clusterSettingsIssues; private final List nodeSettingsIssues; private final Map> indexSettingsIssues; + private final Map> dataStreamIssues; private final Map> pluginSettingsIssues; public Response(StreamInput in) throws IOException { @@ -155,6 +158,11 @@ public Response(StreamInput in) throws IOException { clusterSettingsIssues = in.readCollectionAsList(DeprecationIssue::new); nodeSettingsIssues = in.readCollectionAsList(DeprecationIssue::new); indexSettingsIssues = in.readMapOfLists(DeprecationIssue::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK)) { + dataStreamIssues = in.readMapOfLists(DeprecationIssue::new); + } else { + dataStreamIssues = Map.of(); + } if (in.getTransportVersion().before(TransportVersions.V_7_11_0)) { List mlIssues = in.readCollectionAsList(DeprecationIssue::new); pluginSettingsIssues = new HashMap<>(); @@ -168,11 +176,13 @@ public Response( List clusterSettingsIssues, List nodeSettingsIssues, Map> indexSettingsIssues, + Map> dataStreamIssues, Map> pluginSettingsIssues ) { this.clusterSettingsIssues = clusterSettingsIssues; this.nodeSettingsIssues = nodeSettingsIssues; this.indexSettingsIssues = indexSettingsIssues; + this.dataStreamIssues = dataStreamIssues; Set intersection = Sets.intersection(RESERVED_NAMES, pluginSettingsIssues.keySet()); if (intersection.isEmpty() == false) { throw new ElasticsearchStatusException( @@ -205,6 +215,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(clusterSettingsIssues); out.writeCollection(nodeSettingsIssues); out.writeMap(indexSettingsIssues, StreamOutput::writeCollection); + if (out.getTransportVersion().onOrAfter(TransportVersions.DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK)) { + out.writeMap(dataStreamIssues, StreamOutput::writeCollection); + } if (out.getTransportVersion().before(TransportVersions.V_7_11_0)) { out.writeCollection(pluginSettingsIssues.getOrDefault("ml_settings", Collections.emptyList())); } else { @@ -219,6 +232,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .array("node_settings", nodeSettingsIssues.toArray()) .field("index_settings") .map(indexSettingsIssues) + .field("data_streams") + .map(dataStreamIssues) .mapContents(pluginSettingsIssues) .endObject(); } @@ -260,6 +275,7 @@ public static DeprecationInfoAction.Response from( Request request, NodesDeprecationCheckResponse nodeDeprecationResponse, List> indexSettingsChecks, + List> dataStreamChecks, List> clusterSettingsChecks, Map> pluginSettingIssues, List skipTheseDeprecatedSettings @@ -283,6 +299,19 @@ public static DeprecationInfoAction.Response from( } } + List dataStreamNames = indexNameExpressionResolver.dataStreamNames( + state, + IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN + ); + Map> dataStreamIssues = new HashMap<>(); + for (String dataStreamName : dataStreamNames) { + DataStream dataStream = stateWithSkippedSettingsRemoved.metadata().dataStreams().get(dataStreamName); + List issuesForSingleDataStream = filterChecks(dataStreamChecks, c -> c.apply(dataStream, state)); + if (issuesForSingleDataStream.isEmpty() == false) { + dataStreamIssues.put(dataStreamName, issuesForSingleDataStream); + } + } + // WORKAROUND: move transform deprecation issues into cluster_settings List transformDeprecations = pluginSettingIssues.remove( TransformDeprecationChecker.TRANSFORM_DEPRECATION_KEY @@ -291,7 +320,13 @@ public static DeprecationInfoAction.Response from( clusterSettingsIssues.addAll(transformDeprecations); } - return new DeprecationInfoAction.Response(clusterSettingsIssues, nodeSettingsIssues, indexSettingsIssues, pluginSettingIssues); + return new DeprecationInfoAction.Response( + clusterSettingsIssues, + nodeSettingsIssues, + indexSettingsIssues, + dataStreamIssues, + pluginSettingIssues + ); } } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java index 91e77762870bf..683c29815399b 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/TransportDeprecationInfoAction.java @@ -36,6 +36,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.CLUSTER_SETTINGS_CHECKS; +import static org.elasticsearch.xpack.deprecation.DeprecationChecks.DATA_STREAM_CHECKS; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; public class TransportDeprecationInfoAction extends TransportMasterNodeReadAction< @@ -134,6 +135,7 @@ protected final void masterOperation( request, response, INDEX_SETTINGS_CHECKS, + DATA_STREAM_CHECKS, CLUSTER_SETTINGS_CHECKS, deprecationIssues, skipTheseDeprecations diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java new file mode 100644 index 0000000000000..d5325fb0ff3a4 --- /dev/null +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DataStreamDeprecationChecksTests.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.deprecation; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamOptions; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.xpack.deprecation.DeprecationChecks.DATA_STREAM_CHECKS; +import static org.hamcrest.Matchers.equalTo; + +public class DataStreamDeprecationChecksTests extends ESTestCase { + + public void testOldIndicesCheck() { + long oldIndexCount = randomIntBetween(1, 100); + long newIndexCount = randomIntBetween(1, 100); + long oldSearchableSnapshotCount = 0; + long oldFullyManagedSearchableSnapshotCount = 0; + long oldPartiallyManagedSearchableSnapshotCount = 0; + List allIndices = new ArrayList<>(); + Map nameToIndexMetadata = new HashMap<>(); + for (int i = 0; i < oldIndexCount; i++) { + Settings.Builder settingsBuilder = settings(IndexVersion.fromId(7170099)); + if (randomBoolean()) { + settingsBuilder.put("index.store.type", "snapshot"); + if (randomBoolean()) { + oldFullyManagedSearchableSnapshotCount++; + } else { + settingsBuilder.put("index.store.snapshot.partial", true); + oldPartiallyManagedSearchableSnapshotCount++; + } + oldSearchableSnapshotCount++; + } + IndexMetadata oldIndexMetadata = IndexMetadata.builder("old-data-stream-index-" + i) + .settings(settingsBuilder) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + allIndices.add(oldIndexMetadata.getIndex()); + nameToIndexMetadata.put(oldIndexMetadata.getIndex().getName(), oldIndexMetadata); + } + for (int i = 0; i < newIndexCount; i++) { + Settings.Builder settingsBuilder = settings(IndexVersion.current()); + if (randomBoolean()) { + settingsBuilder.put("index.store.type", "snapshot"); + } + IndexMetadata newIndexMetadata = IndexMetadata.builder("new-data-stream-index-" + i) + .settings(settingsBuilder) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + allIndices.add(newIndexMetadata.getIndex()); + nameToIndexMetadata.put(newIndexMetadata.getIndex().getName(), newIndexMetadata); + } + DataStream dataStream = new DataStream( + randomAlphaOfLength(10), + allIndices, + randomNegativeLong(), + Map.of(), + randomBoolean(), + false, + false, + randomBoolean(), + randomFrom(IndexMode.values()), + null, + randomFrom(DataStreamOptions.EMPTY, DataStreamOptions.FAILURE_STORE_DISABLED, DataStreamOptions.FAILURE_STORE_ENABLED, null), + List.of(), + randomBoolean(), + null + ); + Metadata metadata = Metadata.builder().indices(nameToIndexMetadata).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + DeprecationIssue expected = new DeprecationIssue( + DeprecationIssue.Level.CRITICAL, + "Old data stream with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", + "This data stream has backing indices that were created before Elasticsearch 8.0.0", + false, + Map.of( + "backing_indices", + Map.of( + "count", + oldIndexCount + newIndexCount, + "need_upgrading", + Map.of( + "count", + oldIndexCount, + "searchable_snapshots", + Map.of( + "count", + oldSearchableSnapshotCount, + "fully_mounted", + Map.of("count", oldFullyManagedSearchableSnapshotCount), + "partially_mounted", + Map.of("count", oldPartiallyManagedSearchableSnapshotCount) + ) + ) + ) + ) + ); + List issues = DeprecationChecks.filterChecks(DATA_STREAM_CHECKS, c -> c.apply(dataStream, clusterState)); + assertThat(issues, equalTo(singletonList(expected))); + } +} diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java index 480ac2103fbfa..5750daa8e3673 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -36,7 +37,9 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -63,6 +66,13 @@ protected DeprecationInfoAction.Response createTestInstance() { .collect(Collectors.toList()); indexIssues.put(randomAlphaOfLength(10), perIndexIssues); } + Map> dataStreamIssues = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 10); i++) { + List perDataStreamIssues = Stream.generate(DeprecationInfoActionResponseTests::createTestDeprecationIssue) + .limit(randomIntBetween(0, 10)) + .collect(Collectors.toList()); + dataStreamIssues.put(randomAlphaOfLength(10), perDataStreamIssues); + } Map> pluginIssues = new HashMap<>(); for (int i = 0; i < randomIntBetween(0, 10); i++) { List perPluginIssues = Stream.generate(DeprecationInfoActionResponseTests::createTestDeprecationIssue) @@ -70,7 +80,7 @@ protected DeprecationInfoAction.Response createTestInstance() { .collect(Collectors.toList()); pluginIssues.put(randomAlphaOfLength(10), perPluginIssues); } - return new DeprecationInfoAction.Response(clusterIssues, nodeIssues, indexIssues, pluginIssues); + return new DeprecationInfoAction.Response(clusterIssues, nodeIssues, indexIssues, dataStreamIssues, pluginIssues); } @Override @@ -104,9 +114,13 @@ public void testFrom() throws IOException { boolean clusterIssueFound = randomBoolean(); boolean nodeIssueFound = randomBoolean(); boolean indexIssueFound = randomBoolean(); + boolean dataStreamIssueFound = randomBoolean(); DeprecationIssue foundIssue = createTestDeprecationIssue(); List> clusterSettingsChecks = List.of((s) -> clusterIssueFound ? foundIssue : null); List> indexSettingsChecks = List.of((idx) -> indexIssueFound ? foundIssue : null); + List> dataStreamChecks = List.of( + (ds, cs) -> dataStreamIssueFound ? foundIssue : null + ); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( new ClusterName(randomAlphaOfLength(5)), @@ -125,6 +139,7 @@ public void testFrom() throws IOException { request, nodeDeprecationIssues, indexSettingsChecks, + dataStreamChecks, clusterSettingsChecks, Collections.emptyMap(), Collections.emptyList() @@ -197,6 +212,7 @@ public void testFromWithMergeableNodeIssues() throws IOException { DeprecationIssue foundIssue2 = createTestDeprecationIssue(foundIssue1, metaMap2); List> clusterSettingsChecks = Collections.emptyList(); List> indexSettingsChecks = List.of((idx) -> null); + List> dataStreamChecks = List.of((ds, cs) -> null); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( new ClusterName(randomAlphaOfLength(5)), @@ -214,6 +230,7 @@ public void testFromWithMergeableNodeIssues() throws IOException { request, nodeDeprecationIssues, indexSettingsChecks, + dataStreamChecks, clusterSettingsChecks, Collections.emptyMap(), Collections.emptyList() @@ -239,8 +256,15 @@ public void testRemoveSkippedSettings() throws IOException { settingsBuilder.put("some.undeprecated.property", "someValue3"); settingsBuilder.putList("some.undeprecated.list.property", List.of("someValue4", "someValue5")); Settings inputSettings = settingsBuilder.build(); + IndexMetadata dataStreamIndexMetadata = IndexMetadata.builder("ds-test-index-1") + .settings(inputSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); Metadata metadata = Metadata.builder() .put(IndexMetadata.builder("test").settings(inputSettings).numberOfShards(1).numberOfReplicas(0)) + .put(dataStreamIndexMetadata, true) + .put(DataStream.builder("ds-test", List.of(dataStreamIndexMetadata.getIndex())).build()) .persistentSettings(inputSettings) .build(); @@ -256,6 +280,13 @@ public void testRemoveSkippedSettings() throws IOException { visibleIndexSettings.set(idx.getSettings()); return null; })); + AtomicInteger backingIndicesCount = new AtomicInteger(0); + List> dataStreamChecks = Collections.unmodifiableList( + Arrays.asList((ds, cs) -> { + backingIndicesCount.set(ds.getIndices().size()); + return null; + }) + ); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( new ClusterName(randomAlphaOfLength(5)), @@ -270,6 +301,7 @@ public void testRemoveSkippedSettings() throws IOException { request, nodeDeprecationIssues, indexSettingsChecks, + dataStreamChecks, clusterSettingsChecks, Collections.emptyMap(), List.of("some.deprecated.property", "some.other.*.deprecated.property") @@ -288,19 +320,30 @@ public void testRemoveSkippedSettings() throws IOException { Assert.assertTrue(resultIndexSettings.getAsList("some.undeprecated.list.property").equals(List.of("someValue4", "someValue5"))); Assert.assertFalse(resultIndexSettings.hasValue("some.deprecated.property")); Assert.assertFalse(resultIndexSettings.hasValue("some.other.bad.deprecated.property")); + + assertThat(backingIndicesCount.get(), equalTo(1)); } public void testCtorFailure() { Map> indexNames = Stream.generate(() -> randomAlphaOfLength(10)) .limit(10) .collect(Collectors.toMap(Function.identity(), (_k) -> Collections.emptyList())); + Map> dataStreamNames = Stream.generate(() -> randomAlphaOfLength(10)) + .limit(10) + .collect(Collectors.toMap(Function.identity(), (_k) -> Collections.emptyList())); Set shouldCauseFailure = new HashSet<>(RESERVED_NAMES); for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { Map> pluginSettingsIssues = randomSubsetOf(3, shouldCauseFailure).stream() .collect(Collectors.toMap(Function.identity(), (_k) -> Collections.emptyList())); expectThrows( ElasticsearchStatusException.class, - () -> new DeprecationInfoAction.Response(Collections.emptyList(), Collections.emptyList(), indexNames, pluginSettingsIssues) + () -> new DeprecationInfoAction.Response( + Collections.emptyList(), + Collections.emptyList(), + indexNames, + dataStreamNames, + pluginSettingsIssues + ) ); } } From a21d3753ccd7ce1d2a62f8511b091d482d360a2b Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 11 Nov 2024 14:51:19 +0100 Subject: [PATCH 22/95] Unmute a lot of fixed tests from search race condition (#116587) These are all fixed by #116264 closes #115664 #113430 #115717 #115705 #115970 #115988 #115810 #116027 #115754 #116097 #115818 #116377 #114824 --- muted-tests.yml | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index a00fecc253a7b..38310b6650419 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -127,18 +127,9 @@ tests: - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testStalledShardMigrationProperlyDetected issue: https://github.com/elastic/elasticsearch/issues/115697 -- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT - method: testGeoShapeGeoHash - issue: https://github.com/elastic/elasticsearch/issues/115664 - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testSupportedStream issue: https://github.com/elastic/elasticsearch/issues/113430 -- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT - method: testGeoShapeGeoTile - issue: https://github.com/elastic/elasticsearch/issues/115717 -- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT - method: testGeoShapeGeoHex - issue: https://github.com/elastic/elasticsearch/issues/115705 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Verify start transform reuses destination index} issue: https://github.com/elastic/elasticsearch/issues/115808 @@ -157,32 +148,14 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_crud/Test delete given model referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/115970 -- class: org.elasticsearch.search.slice.SearchSliceIT - method: testPointInTime - issue: https://github.com/elastic/elasticsearch/issues/115988 -- class: org.elasticsearch.action.search.PointInTimeIT - method: testPITTiebreak - issue: https://github.com/elastic/elasticsearch/issues/115810 - class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT method: testReindexWithShutdown issue: https://github.com/elastic/elasticsearch/issues/115996 - class: org.elasticsearch.search.query.SearchQueryIT method: testAllDocsQueryString issue: https://github.com/elastic/elasticsearch/issues/115728 -- class: org.elasticsearch.search.basic.SearchWithRandomExceptionsIT - method: testRandomExceptions - issue: https://github.com/elastic/elasticsearch/issues/116027 -- class: org.elasticsearch.action.admin.HotThreadsIT - method: testHotThreadsDontFail - issue: https://github.com/elastic/elasticsearch/issues/115754 -- class: org.elasticsearch.search.functionscore.QueryRescorerIT - method: testScoring - issue: https://github.com/elastic/elasticsearch/issues/116050 - class: org.elasticsearch.xpack.application.connector.ConnectorIndexServiceTests issue: https://github.com/elastic/elasticsearch/issues/116087 -- class: org.elasticsearch.xpack.searchbusinessrules.PinnedQueryBuilderIT - method: testPinnedPromotions - issue: https://github.com/elastic/elasticsearch/issues/116097 - class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT method: test {p0=cat.shards/10_basic/Help} issue: https://github.com/elastic/elasticsearch/issues/116110 @@ -195,9 +168,6 @@ tests: - class: org.elasticsearch.upgrades.FullClusterRestartIT method: testSnapshotRestore {cluster=OLD} issue: https://github.com/elastic/elasticsearch/issues/111777 -- class: org.elasticsearch.xpack.spatial.search.GeoGridAggAndQueryConsistencyIT - method: testGeoPointGeoTile - issue: https://github.com/elastic/elasticsearch/issues/115818 - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT method: testLookbackWithIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/116127 @@ -261,9 +231,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {categorize.Categorize ASYNC} issue: https://github.com/elastic/elasticsearch/issues/116373 -- class: org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotsIntegTests - method: testCreateAndRestoreSearchableSnapshot - issue: https://github.com/elastic/elasticsearch/issues/116377 - class: org.elasticsearch.threadpool.SimpleThreadPoolIT method: testThreadPoolMetrics issue: https://github.com/elastic/elasticsearch/issues/108320 @@ -302,9 +269,6 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_crud/Test force delete given model referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/116555 -- class: org.elasticsearch.search.basic.SearchWithRandomIOExceptionsIT - method: testRandomDirectoryIOExceptions - issue: https://github.com/elastic/elasticsearch/issues/114824 # Examples: # From e0aa1ade43857ba41309a84fbce22898da8c9167 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 11 Nov 2024 14:19:03 +0000 Subject: [PATCH 23/95] Remove test mute for #110408 (#116475) This was fixed by #113630 but without unmuting the test. Closes #110408 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 38310b6650419..f49b303a2bc50 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -5,9 +5,6 @@ tests: - class: "org.elasticsearch.client.RestClientSingleHostIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/102717" method: "testRequestResetAndAbort" -- class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" - issue: "https://github.com/elastic/elasticsearch/issues/110408" - method: "testCreateAndRestorePartialSearchableSnapshot" - class: org.elasticsearch.xpack.restart.FullClusterRestartIT method: testSingleDoc {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111434 From 2cbc6576b6a17c4e46f44b84ec68d4c0019db224 Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Mon, 11 Nov 2024 15:58:50 +0100 Subject: [PATCH 24/95] Do not pass ThreadPool to DesiredBalanceComputer (#116590) Relates https://github.com/elastic/elasticsearch/pull/115511#discussion_r1814819721. `ThreadPool` is used here only to get time. (I've extracted this out of https://github.com/elastic/elasticsearch/pull/116333). --- .../allocator/DesiredBalanceComputer.java | 7 +-- .../DesiredBalanceShardsAllocator.java | 2 +- ...nsportDeleteDesiredBalanceActionTests.java | 2 +- .../DesiredBalanceComputerTests.java | 57 +++++++++---------- .../DesiredBalanceShardsAllocatorTests.java | 20 +++++-- 5 files changed, 44 insertions(+), 44 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 42240a996c531..682dc85ccd00f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -26,7 +26,6 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.HashMap; @@ -74,11 +73,7 @@ public class DesiredBalanceComputer { private TimeValue progressLogInterval; private long maxBalanceComputationTimeDuringIndexCreationMillis; - public DesiredBalanceComputer(ClusterSettings clusterSettings, ThreadPool threadPool, ShardsAllocator delegateAllocator) { - this(clusterSettings, delegateAllocator, threadPool::relativeTimeInMillis); - } - - DesiredBalanceComputer(ClusterSettings clusterSettings, ShardsAllocator delegateAllocator, LongSupplier timeSupplierMillis) { + public DesiredBalanceComputer(ClusterSettings clusterSettings, LongSupplier timeSupplierMillis, ShardsAllocator delegateAllocator) { this.delegateAllocator = delegateAllocator; this.timeSupplierMillis = timeSupplierMillis; clusterSettings.initializeAndWatch(PROGRESS_LOG_INTERVAL_SETTING, value -> this.progressLogInterval = value); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 0cfb3af87f012..5ccb59e29d7dc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -91,7 +91,7 @@ public DesiredBalanceShardsAllocator( delegateAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator), + new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator), reconciler, telemetryProvider ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java index 17fab91d97cad..bb4aa9beeb42e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java @@ -101,7 +101,7 @@ public void testDeleteDesiredBalance() throws Exception { var clusterSettings = ClusterSettings.createBuiltInClusterSettings(settings); var delegate = new BalancedShardsAllocator(); - var computer = new DesiredBalanceComputer(clusterSettings, threadPool, delegate) { + var computer = new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegate) { final AtomicReference lastComputationInput = new AtomicReference<>(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index 56a687646b364..51401acabb0ac 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -53,7 +53,6 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.test.MockLog; -import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.HashMap; @@ -85,8 +84,6 @@ import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class DesiredBalanceComputerTests extends ESAllocationTestCase { @@ -1205,43 +1202,43 @@ public void testShouldLogComputationIteration() { } private void checkIterationLogging(int iterations, long eachIterationDuration, MockLog.AbstractEventExpectation expectation) { - - var mockThreadPool = mock(ThreadPool.class); var currentTime = new AtomicLong(0L); - when(mockThreadPool.relativeTimeInMillis()).thenAnswer(invocation -> currentTime.addAndGet(eachIterationDuration)); - // Some runs of this test try to simulate a long desired balance computation. Setting a high value on the following setting // prevents interrupting a long computation. var clusterSettings = createBuiltInClusterSettings( Settings.builder().put(DesiredBalanceComputer.MAX_BALANCE_COMPUTATION_TIME_DURING_INDEX_CREATION_SETTING.getKey(), "2m").build() ); - var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, mockThreadPool, new ShardsAllocator() { - @Override - public void allocate(RoutingAllocation allocation) { - final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); - while (unassignedIterator.hasNext()) { - final var shardRouting = unassignedIterator.next(); - if (shardRouting.primary()) { - unassignedIterator.initialize("node-0", null, 0L, allocation.changes()); - } else { - unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); + var desiredBalanceComputer = new DesiredBalanceComputer( + clusterSettings, + () -> currentTime.addAndGet(eachIterationDuration), + new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); + while (unassignedIterator.hasNext()) { + final var shardRouting = unassignedIterator.next(); + if (shardRouting.primary()) { + unassignedIterator.initialize("node-0", null, 0L, allocation.changes()); + } else { + unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); + } } - } - // move shard on each iteration - for (var shard : allocation.routingNodes().node("node-0").shardsWithState(STARTED).toList()) { - allocation.routingNodes().relocateShard(shard, "node-1", 0L, "test", allocation.changes()); - } - for (var shard : allocation.routingNodes().node("node-1").shardsWithState(STARTED).toList()) { - allocation.routingNodes().relocateShard(shard, "node-0", 0L, "test", allocation.changes()); + // move shard on each iteration + for (var shard : allocation.routingNodes().node("node-0").shardsWithState(STARTED).toList()) { + allocation.routingNodes().relocateShard(shard, "node-1", 0L, "test", allocation.changes()); + } + for (var shard : allocation.routingNodes().node("node-1").shardsWithState(STARTED).toList()) { + allocation.routingNodes().relocateShard(shard, "node-0", 0L, "test", allocation.changes()); + } } - } - @Override - public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { - throw new AssertionError("only used for allocation explain"); + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + throw new AssertionError("only used for allocation explain"); + } } - }); + ); assertThatLogger(() -> { var iteration = new AtomicInteger(0); @@ -1349,7 +1346,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } private static DesiredBalanceComputer createDesiredBalanceComputer(ShardsAllocator allocator) { - return new DesiredBalanceComputer(createBuiltInClusterSettings(), mock(ThreadPool.class), allocator); + return new DesiredBalanceComputer(createBuiltInClusterSettings(), () -> 0L, allocator); } private static void assertDesiredAssignments(DesiredBalance desiredBalance, Map expected) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index 27c430131ff07..2cb3204787ce1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -396,7 +396,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, shardsAllocator, time::get) { + new DesiredBalanceComputer(clusterSettings, time::get, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -522,7 +522,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -625,7 +625,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -712,7 +712,7 @@ public void testResetDesiredBalance() { var delegateAllocator = createShardsAllocator(); var clusterSettings = createBuiltInClusterSettings(); - var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator) { + var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator) { final AtomicReference lastComputationInput = new AtomicReference<>(); @@ -780,7 +780,11 @@ public void testResetDesiredBalanceOnNoLongerMaster() { var clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool); var delegateAllocator = createShardsAllocator(); - var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator); + var desiredBalanceComputer = new DesiredBalanceComputer( + createBuiltInClusterSettings(), + threadPool::relativeTimeInMillis, + delegateAllocator + ); var desiredBalanceShardsAllocator = new DesiredBalanceShardsAllocator( delegateAllocator, threadPool, @@ -829,7 +833,11 @@ public void testResetDesiredBalanceOnNodeShutdown() { final var resetCalled = new AtomicBoolean(); var delegateAllocator = createShardsAllocator(); - var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator); + var desiredBalanceComputer = new DesiredBalanceComputer( + createBuiltInClusterSettings(), + threadPool::relativeTimeInMillis, + delegateAllocator + ); var desiredBalanceAllocator = new DesiredBalanceShardsAllocator( delegateAllocator, threadPool, From 3ebc1f48aaed8bfaecbb86881503373f9bda2491 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Mon, 11 Nov 2024 15:59:11 +0100 Subject: [PATCH 25/95] Clarify docs around disk capacity expectation. (#115745) Make it explicit that es expects disks to have the same capacity across all the nodes in the same data tier. --- docs/reference/datatiers.asciidoc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/reference/datatiers.asciidoc b/docs/reference/datatiers.asciidoc index c37f54b5c9cae..65e029d876e6f 100644 --- a/docs/reference/datatiers.asciidoc +++ b/docs/reference/datatiers.asciidoc @@ -37,9 +37,8 @@ TIP: The performance of an {es} node is often limited by the performance of the For example hardware profiles, refer to Elastic Cloud's {cloud}/ec-reference-hardware.html[instance configurations]. Review our recommendations for optimizing your storage for <> and <>. -IMPORTANT: {es} generally expects nodes within a data tier to share the same -hardware profile. Variations not following this recommendation should be -carefully architected to avoid <>. +IMPORTANT: {es} assumes nodes within a data tier share the same hardware profile (such as CPU, RAM, disk capacity). +Data tiers with unequally resourced nodes have a higher risk of <>. The way data tiers are used often depends on the data's category: From 0ce310ac293f2e8c18ee27ba2464fa88b2011925 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 11 Nov 2024 16:23:35 +0100 Subject: [PATCH 26/95] Adding full CompatibilityVersions to NodeInfo (#116577) Extracting the Transport protocol related changes from https://github.com/elastic/elasticsearch/pull/115771 to make backport easier. --- .../org/elasticsearch/TransportVersions.java | 2 ++ .../admin/cluster/node/info/NodeInfo.java | 35 ++++++++++++++----- .../elasticsearch/node/NodeConstruction.java | 3 +- .../org/elasticsearch/node/NodeService.java | 9 +++-- .../cluster/node/info/NodeInfoTests.java | 3 +- .../remote/RemoteClusterNodesActionTests.java | 5 +-- .../cluster/stats/ClusterStatsNodesTests.java | 3 +- .../ingest/ReservedPipelineActionTests.java | 3 +- .../TransportVersionsFixupListenerTests.java | 32 +++++++++++++---- .../nodesinfo/NodeInfoStreamingTests.java | 3 +- .../action/cat/RestPluginsActionTests.java | 3 +- .../AutoscalingNodesInfoServiceTests.java | 3 +- .../TransportNodeEnrollmentActionTests.java | 3 +- ...InternalEnrollmentTokenGeneratorTests.java | 5 +-- 14 files changed, 81 insertions(+), 31 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 1c7e1eee5df65..5f3b466f9f7bd 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -175,6 +175,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0); public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); + public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16 = def(8_772_00_2); public static final TransportVersion REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_773_00_0); public static final TransportVersion REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_774_00_0); public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); @@ -191,6 +192,7 @@ static TransportVersion def(int id) { public static final TransportVersion KQL_QUERY_ADDED = def(8_786_00_0); public static final TransportVersion ROLE_MONITOR_STATS = def(8_787_00_0); public static final TransportVersion DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK = def(8_788_00_0); + public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO = def(8_789_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 71e3185329ed3..a7d92682b763c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -15,6 +15,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -22,6 +23,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.http.HttpInfo; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.ingest.IngestInfo; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.os.OsInfo; @@ -42,7 +44,7 @@ public class NodeInfo extends BaseNodeResponse { private final String version; - private final TransportVersion transportVersion; + private final CompatibilityVersions compatibilityVersions; private final IndexVersion indexVersion; private final Map componentVersions; private final Build build; @@ -64,15 +66,20 @@ public NodeInfo(StreamInput in) throws IOException { super(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { version = in.readString(); - transportVersion = TransportVersion.readVersion(in); + if (in.getTransportVersion().isPatchFrom(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16) + || in.getTransportVersion().onOrAfter(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO)) { + compatibilityVersions = CompatibilityVersions.readVersion(in); + } else { + compatibilityVersions = new CompatibilityVersions(TransportVersion.readVersion(in), Map.of()); // unknown mappings versions + } indexVersion = IndexVersion.readVersion(in); } else { Version legacyVersion = Version.readVersion(in); version = legacyVersion.toString(); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - transportVersion = TransportVersion.readVersion(in); + compatibilityVersions = new CompatibilityVersions(TransportVersion.readVersion(in), Map.of()); // unknown mappings versions } else { - transportVersion = TransportVersion.fromId(legacyVersion.id); + compatibilityVersions = new CompatibilityVersions(TransportVersion.fromId(legacyVersion.id), Map.of()); } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) { indexVersion = IndexVersion.readVersion(in); @@ -114,7 +121,7 @@ public NodeInfo(StreamInput in) throws IOException { public NodeInfo( String version, - TransportVersion transportVersion, + CompatibilityVersions compatibilityVersions, IndexVersion indexVersion, Map componentVersions, Build build, @@ -134,7 +141,7 @@ public NodeInfo( ) { super(node); this.version = version; - this.transportVersion = transportVersion; + this.compatibilityVersions = compatibilityVersions; this.indexVersion = indexVersion; this.componentVersions = componentVersions; this.build = build; @@ -171,7 +178,7 @@ public String getVersion() { * The most recent transport version that can be used by this node */ public TransportVersion getTransportVersion() { - return transportVersion; + return compatibilityVersions.transportVersion(); } /** @@ -188,6 +195,13 @@ public Map getComponentVersions() { return componentVersions; } + /** + * A map of system index names to versions for their mappings supported by this node. + */ + public Map getCompatibilityVersions() { + return compatibilityVersions.systemIndexMappingsVersion(); + } + /** * The build version of the node. */ @@ -240,8 +254,11 @@ public void writeTo(StreamOutput out) throws IOException { } else { Version.writeVersion(Version.fromString(version), out); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - TransportVersion.writeVersion(transportVersion, out); + if (out.getTransportVersion().isPatchFrom(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16) + || out.getTransportVersion().onOrAfter(TransportVersions.ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO)) { + compatibilityVersions.writeTo(out); + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + TransportVersion.writeVersion(compatibilityVersions.transportVersion(), out); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) { IndexVersion.writeVersion(indexVersion, out); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 5354b1097326b..b424b417da82b 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -1077,7 +1077,8 @@ private void construct( searchTransportService, indexingLimits, searchModule.getValuesSourceRegistry().getUsageService(), - repositoriesService + repositoriesService, + compatibilityVersions ); final TimeValue metricsInterval = settings.getAsTime("telemetry.agent.metrics_interval", TimeValue.timeValueSeconds(10)); diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index 9310849ba8111..7c71487ed68ca 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -10,7 +10,6 @@ package org.elasticsearch.node; import org.elasticsearch.Build; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -19,6 +18,7 @@ import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.unit.ByteSizeValue; @@ -65,6 +65,7 @@ public class NodeService implements Closeable { private final Coordinator coordinator; private final RepositoriesService repositoriesService; private final Map componentVersions; + private final CompatibilityVersions compatibilityVersions; NodeService( Settings settings, @@ -84,7 +85,8 @@ public class NodeService implements Closeable { SearchTransportService searchTransportService, IndexingPressure indexingPressure, AggregationUsageService aggregationUsageService, - RepositoriesService repositoriesService + RepositoriesService repositoriesService, + CompatibilityVersions compatibilityVersions ) { this.settings = settings; this.threadPool = threadPool; @@ -104,6 +106,7 @@ public class NodeService implements Closeable { this.aggregationUsageService = aggregationUsageService; this.repositoriesService = repositoriesService; this.componentVersions = findComponentVersions(pluginService); + this.compatibilityVersions = compatibilityVersions; clusterService.addStateApplier(ingestService); } @@ -124,7 +127,7 @@ public NodeInfo info( return new NodeInfo( // TODO: revert to Build.current().version() when Kibana is updated Version.CURRENT.toString(), - TransportVersion.current(), + compatibilityVersions, IndexVersion.current(), componentVersions, Build.current(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java index 5fa138abca809..9d01f411d35aa 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfoTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -40,7 +41,7 @@ public class NodeInfoTests extends ESTestCase { public void testGetInfo() { NodeInfo nodeInfo = new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java index 3eb0ff9fae674..6a9d6973a0047 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -78,7 +79,7 @@ public void testDoExecuteForRemoteServerNodes() { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, @@ -156,7 +157,7 @@ public void testDoExecuteForRemoteNodes() { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index 44ceb94b392e5..627c57e07a1f3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodeStatsTests; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -327,7 +328,7 @@ private static NodeInfo createNodeInfo(String nodeId, String transportType, Stri } return new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java index 331f754d437a7..0bc5c69d8ad4b 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/ReservedPipelineActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -103,7 +104,7 @@ public void setup() { NodeInfo nodeInfo = new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java index f9d3b7fcc920b..9eec8309bbb83 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.Maps; import org.elasticsearch.features.FeatureService; +import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler; import org.mockito.ArgumentCaptor; @@ -34,11 +35,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; +import static java.util.Map.entry; import static org.elasticsearch.test.LambdaMatchers.transformedMatch; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.same; @@ -77,7 +81,7 @@ private static Map versions(T... versions) { return tvs; } - private static NodesInfoResponse getResponse(Map responseData) { + private static NodesInfoResponse getResponse(Map responseData) { return new NodesInfoResponse( ClusterName.DEFAULT, responseData.entrySet() @@ -207,10 +211,19 @@ public void testVersionsAreFixed() { argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), action.capture() ); - action.getValue().onResponse(getResponse(Map.of("node1", NEXT_TRANSPORT_VERSION, "node2", NEXT_TRANSPORT_VERSION))); + action.getValue() + .onResponse( + getResponse( + Map.ofEntries( + entry("node1", new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of())), + entry("node2", new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of())) + ) + ) + ); verify(taskQueue).submitTask(anyString(), task.capture(), any()); - assertThat(task.getValue().results(), equalTo(Map.of("node1", NEXT_TRANSPORT_VERSION, "node2", NEXT_TRANSPORT_VERSION))); + assertThat(task.getValue().results().keySet(), equalTo(Set.of("node1", "node2"))); + assertThat(task.getValue().results().values(), everyItem(equalTo(NEXT_TRANSPORT_VERSION))); } public void testConcurrentChangesDoNotOverlap() { @@ -259,12 +272,17 @@ public void testFailedRequestsAreRetried() { Scheduler scheduler = mock(Scheduler.class); Executor executor = mock(Executor.class); + var compatibilityVersions = new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index-1", new SystemIndexDescriptor.MappingsVersion(1, 1234)) + ); ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) - .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) + .nodes(node(Version.CURRENT, Version.CURRENT, Version.CURRENT)) .nodeIdsToCompatibilityVersions( - Maps.transformValues( - versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), - transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) + Map.ofEntries( + entry("node0", compatibilityVersions), + entry("node1", new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of())), + entry("node2", new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of())) ) ) .build(); diff --git a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index fd839999edf21..33801dfb98417 100644 --- a/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -241,7 +242,7 @@ private static NodeInfo createNodeInfo() { } return new NodeInfo( randomAlphaOfLengthBetween(6, 32), - TransportVersionUtils.randomVersion(random()), + new CompatibilityVersions(TransportVersionUtils.randomVersion(random()), Map.of()), IndexVersionUtils.randomVersion(random()), componentVersions, build, diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java index 766fefbeddb0f..0994f9bf2303c 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestPluginsActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Table; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.plugins.PluginDescriptor; @@ -66,7 +67,7 @@ private Table buildTable(List pluginDescriptor) { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java index 9658db911f6df..85cd415102124 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -452,7 +453,7 @@ private static org.elasticsearch.action.admin.cluster.node.info.NodeInfo infoFor OsInfo osInfo = new OsInfo(randomLong(), processors, Processors.of((double) processors), null, null, null, null); return new org.elasticsearch.action.admin.cluster.node.info.NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), Build.current(), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java index a4d94f9762e69..c85684a60e449 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/enrollment/TransportNodeEnrollmentActionTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslConfiguration; @@ -103,7 +104,7 @@ public void testDoExecute() throws Exception { nodeInfos.add( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java index dd6c41b0a10eb..383d4e4c9fe9f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.BackoffPolicy; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; @@ -236,7 +237,7 @@ public Answer answerNullHttpInfo(InvocationOnMock invocationO List.of( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, @@ -271,7 +272,7 @@ private Answer answerWithInfo(InvocationOnMock invocationOnMo List.of( new NodeInfo( Build.current().version(), - TransportVersion.current(), + new CompatibilityVersions(TransportVersion.current(), Map.of()), IndexVersion.current(), Map.of(), null, From 19291cf37cf9540ce83fdc1a5edc8acc785bcb11 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Mon, 11 Nov 2024 10:30:49 -0500 Subject: [PATCH 27/95] [ESQL] test date nanos union type (#116265) Resolves #112885 This PR adds a bunch of basic testing for using TO_DATE_NANOS as a union type. It also tests the TO_DATETIME union type for casting date nanos. There are still some cases that aren't covered here, mostly because we haven't finished adding date nanos support to all the relevant functions. I expect we'll add in those cases as we add functions support. --------- Co-authored-by: Elastic Machine --- .../xpack/esql/CsvTestUtils.java | 3 +- .../xpack/esql/CsvTestsDataLoader.java | 4 + .../main/resources/sample_data_ts_nanos.csv | 8 + .../src/main/resources/union_types.csv-spec | 471 +++++++++++++----- 4 files changed, 347 insertions(+), 139 deletions(-) create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index bd8bd0f688837..7adafa908ce4f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -471,7 +472,7 @@ public enum Type { return null; } Instant parsed = DateFormatters.from(ISO_DATE_WITH_NANOS.parse(x)).toInstant(); - return parsed.getEpochSecond() * 1_000_000_000 + parsed.getNano(); + return DateUtils.toLong(parsed); }, (l, r) -> l instanceof Long maybeIP ? maybeIP.compareTo((Long) r) : l.toString().compareTo(r.toString()), Long.class), BOOLEAN(Booleans::parseBoolean, Boolean.class), GEO_POINT(x -> x == null ? null : GEO.wktToWkb(x), BytesRef.class), diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 2bd7ecc37b034..478c68db68aa7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -64,6 +64,9 @@ public class CsvTestsDataLoader { private static final TestsDataset SAMPLE_DATA_TS_LONG = SAMPLE_DATA.withIndex("sample_data_ts_long") .withData("sample_data_ts_long.csv") .withTypeMapping(Map.of("@timestamp", "long")); + private static final TestsDataset SAMPLE_DATA_TS_NANOS = SAMPLE_DATA.withIndex("sample_data_ts_nanos") + .withData("sample_data_ts_nanos.csv") + .withTypeMapping(Map.of("@timestamp", "date_nanos")); private static final TestsDataset MISSING_IP_SAMPLE_DATA = new TestsDataset("missing_ip_sample_data"); private static final TestsDataset CLIENT_IPS = new TestsDataset("clientips"); private static final TestsDataset CLIENT_CIDR = new TestsDataset("client_cidr"); @@ -101,6 +104,7 @@ public class CsvTestsDataLoader { Map.entry(ALERTS.indexName, ALERTS), Map.entry(SAMPLE_DATA_STR.indexName, SAMPLE_DATA_STR), Map.entry(SAMPLE_DATA_TS_LONG.indexName, SAMPLE_DATA_TS_LONG), + Map.entry(SAMPLE_DATA_TS_NANOS.indexName, SAMPLE_DATA_TS_NANOS), Map.entry(MISSING_IP_SAMPLE_DATA.indexName, MISSING_IP_SAMPLE_DATA), Map.entry(CLIENT_IPS.indexName, CLIENT_IPS), Map.entry(CLIENT_CIDR.indexName, CLIENT_CIDR), diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv new file mode 100644 index 0000000000000..eb947f27cc1ee --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/sample_data_ts_nanos.csv @@ -0,0 +1,8 @@ +@timestamp:date_nanos,client_ip:ip,event_duration:long,message:keyword +2023-10-23T13:55:01.543123456Z,172.21.3.15,1756467,Connected to 10.1.0.1 +2023-10-23T13:53:55.832123456Z,172.21.3.15,5033755,Connection error +2023-10-23T13:52:55.015123456Z,172.21.3.15,8268153,Connection error +2023-10-23T13:51:54.732123456Z,172.21.3.15,725448,Connection error +2023-10-23T13:33:34.937123456Z,172.21.0.5,1232382,Disconnected +2023-10-23T12:27:28.948123456Z,172.21.2.113,2764889,Connected to 10.1.0.2 +2023-10-23T12:15:03.360123456Z,172.21.2.162,3450233,Connected to 10.1.0.3 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec index a51e4fe995fb3..a2fd3f3d5e0da 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/union_types.csv-spec @@ -44,6 +44,44 @@ FROM sample_data_ts_long 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; +singleIndexTsNanosAsMillis +required_capability: to_date_nanos + +FROM sample_data_ts_nanos +| EVAL @timestamp = TO_DATETIME(@timestamp) +| KEEP @timestamp, client_ip, event_duration, message +| SORT @timestamp DESC +; + +@timestamp:date | client_ip:ip | event_duration:long | message:keyword +2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +singleIndexTsMillisAsNanos +required_capability: to_date_nanos + +FROM sample_data +| EVAL @timestamp = TO_DATE_NANOS(@timestamp) +| KEEP @timestamp, client_ip, event_duration, message +| SORT @timestamp DESC +; + +@timestamp:date_nanos | client_ip:ip | event_duration:long | message:keyword +2023-10-23T13:55:01.543000000Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +2023-10-23T13:53:55.832000000Z | 172.21.3.15 | 5033755 | Connection error +2023-10-23T13:52:55.015000000Z | 172.21.3.15 | 8268153 | Connection error +2023-10-23T13:51:54.732000000Z | 172.21.3.15 | 725448 | Connection error +2023-10-23T13:33:34.937000000Z | 172.21.0.5 | 1232382 | Disconnected +2023-10-23T12:27:28.948000000Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +2023-10-23T12:15:03.360000000Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + singleIndexIpStats required_capability: casting_operator @@ -529,6 +567,92 @@ sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; + +multiIndexTsNanosRename +required_capability: to_date_nanos +required_capability: union_types +required_capability: metadata_fields +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos METADATA _index +| EVAL ts = TO_DATETIME(@timestamp) +| KEEP _index, ts, client_ip, event_duration, message +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:date | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexTsNanosRenameToNanos +required_capability: to_date_nanos +required_capability: union_types +required_capability: metadata_fields +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos METADATA _index +| EVAL ts = TO_DATE_NANOS(@timestamp) +| KEEP _index, ts, client_ip, event_duration, message +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:date_nanos | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543000000Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832000000Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015000000Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732000000Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937000000Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948000000Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360000000Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 2023-10-23T13:55:01.543123456Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832123456Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015123456Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732123456Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937123456Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948123456Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360123456Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +; + +multiIndexTsNanosRenameToNanosWithFiltering +required_capability: to_date_nanos +required_capability: date_nanos_binary_comparison +required_capability: union_types +required_capability: metadata_fields +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos METADATA _index +| EVAL ts = TO_DATE_NANOS(@timestamp) +| WHERE ts > TO_DATE_NANOS("2023-10-23T13:00:00Z") +| KEEP _index, ts, client_ip, event_duration, message +| SORT _index ASC, ts DESC +; + +_index:keyword | ts:date_nanos | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543000000Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832000000Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015000000Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732000000Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937000000Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T13:55:01.543123456Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832123456Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015123456Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732123456Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937123456Z | 172.21.0.5 | 1232382 | Disconnected +; + multiIndexTsLongRenameToString required_capability: union_types required_capability: metadata_fields @@ -591,24 +715,57 @@ count:long | @timestamp:date 4 | 2023-10-23T12:00:00.000Z ; +multiIndexTsNanosToDatetimeStats +required_capability: union_types +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos +| EVAL @timestamp = DATE_TRUNC(1 hour, TO_DATETIME(@timestamp)) +| STATS count=count(*) BY @timestamp +| SORT count DESC, @timestamp ASC +| KEEP count, @timestamp +; + +count:long | @timestamp:date +10 | 2023-10-23T13:00:00.000Z +4 | 2023-10-23T12:00:00.000Z +; + +Multi Index millis to nanos stats +required_capability: union_types +required_capability: union_types_remove_fields + +FROM sample_data, sample_data_ts_nanos +| EVAL @timestamp = DATE_TRUNC(1 hour, TO_DATE_NANOS(@timestamp)) +| STATS count=count(*) BY @timestamp +| SORT count DESC, @timestamp ASC +| KEEP count, @timestamp +; + +count:long | @timestamp:date_nanos +10 | 2023-10-23T13:00:00.000Z +4 | 2023-10-23T12:00:00.000Z +; + + multiIndexTsLongStatsDrop required_capability: union_types required_capability: union_types_agg_cast required_capability: casting_operator -FROM sample_data, sample_data_ts_long +FROM sample_data, sample_data_ts_long, sample_data_ts_nanos | STATS count=count(*) BY @timestamp::datetime | KEEP count ; count:long -2 -2 -2 -2 -2 -2 -2 +3 +3 +3 +3 +3 +3 +3 ; multiIndexTsLongStatsInline2 @@ -616,19 +773,19 @@ required_capability: union_types required_capability: union_types_agg_cast required_capability: casting_operator -FROM sample_data, sample_data_ts_long +FROM sample_data, sample_data_ts_long, sample_data_ts_nanos | STATS count=count(*) BY @timestamp::datetime | SORT count DESC, `@timestamp::datetime` DESC ; count:long | @timestamp::datetime:datetime -2 | 2023-10-23T13:55:01.543Z -2 | 2023-10-23T13:53:55.832Z -2 | 2023-10-23T13:52:55.015Z -2 | 2023-10-23T13:51:54.732Z -2 | 2023-10-23T13:33:34.937Z -2 | 2023-10-23T12:27:28.948Z -2 | 2023-10-23T12:15:03.360Z +3 | 2023-10-23T13:55:01.543Z +3 | 2023-10-23T13:53:55.832Z +3 | 2023-10-23T13:52:55.015Z +3 | 2023-10-23T13:51:54.732Z +3 | 2023-10-23T13:33:34.937Z +3 | 2023-10-23T12:27:28.948Z +3 | 2023-10-23T12:15:03.360Z ; multiIndexTsLongStatsInline3 @@ -765,28 +922,35 @@ FROM sample_data* METADATA _index | SORT _index ASC, @timestamp DESC ; -_index:keyword | @timestamp:date | client_ip:ip | event_duration:long | message:keyword -sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +_index:keyword | @timestamp:date | client_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; multiIndexIpStringTsLongDropped @@ -799,28 +963,35 @@ FROM sample_data* METADATA _index | SORT _index ASC, event_duration ASC ; -_index:keyword | event_duration:long | message:keyword -sample_data | 725448 | Connection error -sample_data | 1232382 | Disconnected -sample_data | 1756467 | Connected to 10.1.0.1 -sample_data | 2764889 | Connected to 10.1.0.2 -sample_data | 3450233 | Connected to 10.1.0.3 -sample_data | 5033755 | Connection error -sample_data | 8268153 | Connection error -sample_data_str | 725448 | Connection error -sample_data_str | 1232382 | Disconnected -sample_data_str | 1756467 | Connected to 10.1.0.1 -sample_data_str | 2764889 | Connected to 10.1.0.2 -sample_data_str | 3450233 | Connected to 10.1.0.3 -sample_data_str | 5033755 | Connection error -sample_data_str | 8268153 | Connection error -sample_data_ts_long | 725448 | Connection error -sample_data_ts_long | 1232382 | Disconnected -sample_data_ts_long | 1756467 | Connected to 10.1.0.1 -sample_data_ts_long | 2764889 | Connected to 10.1.0.2 -sample_data_ts_long | 3450233 | Connected to 10.1.0.3 -sample_data_ts_long | 5033755 | Connection error -sample_data_ts_long | 8268153 | Connection error +_index:keyword | event_duration:long | message:keyword +sample_data | 725448 | Connection error +sample_data | 1232382 | Disconnected +sample_data | 1756467 | Connected to 10.1.0.1 +sample_data | 2764889 | Connected to 10.1.0.2 +sample_data | 3450233 | Connected to 10.1.0.3 +sample_data | 5033755 | Connection error +sample_data | 8268153 | Connection error +sample_data_str | 725448 | Connection error +sample_data_str | 1232382 | Disconnected +sample_data_str | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2764889 | Connected to 10.1.0.2 +sample_data_str | 3450233 | Connected to 10.1.0.3 +sample_data_str | 5033755 | Connection error +sample_data_str | 8268153 | Connection error +sample_data_ts_long | 725448 | Connection error +sample_data_ts_long | 1232382 | Disconnected +sample_data_ts_long | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 5033755 | Connection error +sample_data_ts_long | 8268153 | Connection error +sample_data_ts_nanos | 725448 | Connection error +sample_data_ts_nanos | 1232382 | Disconnected +sample_data_ts_nanos | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 5033755 | Connection error +sample_data_ts_nanos | 8268153 | Connection error ; multiIndexIpStringTsLongRename @@ -834,28 +1005,35 @@ FROM sample_data* METADATA _index | SORT _index ASC, ts DESC ; -_index:keyword | ts:date | host_ip:ip | event_duration:long | message:keyword -sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +_index:keyword | ts:date | host_ip:ip | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; multiIndexIpStringTsLongRenameDropped @@ -868,28 +1046,35 @@ FROM sample_data* METADATA _index | SORT _index ASC, event_duration ASC ; -_index:keyword | event_duration:long | message:keyword -sample_data | 725448 | Connection error -sample_data | 1232382 | Disconnected -sample_data | 1756467 | Connected to 10.1.0.1 -sample_data | 2764889 | Connected to 10.1.0.2 -sample_data | 3450233 | Connected to 10.1.0.3 -sample_data | 5033755 | Connection error -sample_data | 8268153 | Connection error -sample_data_str | 725448 | Connection error -sample_data_str | 1232382 | Disconnected -sample_data_str | 1756467 | Connected to 10.1.0.1 -sample_data_str | 2764889 | Connected to 10.1.0.2 -sample_data_str | 3450233 | Connected to 10.1.0.3 -sample_data_str | 5033755 | Connection error -sample_data_str | 8268153 | Connection error -sample_data_ts_long | 725448 | Connection error -sample_data_ts_long | 1232382 | Disconnected -sample_data_ts_long | 1756467 | Connected to 10.1.0.1 -sample_data_ts_long | 2764889 | Connected to 10.1.0.2 -sample_data_ts_long | 3450233 | Connected to 10.1.0.3 -sample_data_ts_long | 5033755 | Connection error -sample_data_ts_long | 8268153 | Connection error +_index:keyword | event_duration:long | message:keyword +sample_data | 725448 | Connection error +sample_data | 1232382 | Disconnected +sample_data | 1756467 | Connected to 10.1.0.1 +sample_data | 2764889 | Connected to 10.1.0.2 +sample_data | 3450233 | Connected to 10.1.0.3 +sample_data | 5033755 | Connection error +sample_data | 8268153 | Connection error +sample_data_str | 725448 | Connection error +sample_data_str | 1232382 | Disconnected +sample_data_str | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2764889 | Connected to 10.1.0.2 +sample_data_str | 3450233 | Connected to 10.1.0.3 +sample_data_str | 5033755 | Connection error +sample_data_str | 8268153 | Connection error +sample_data_ts_long | 725448 | Connection error +sample_data_ts_long | 1232382 | Disconnected +sample_data_ts_long | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 5033755 | Connection error +sample_data_ts_long | 8268153 | Connection error +sample_data_ts_nanos | 725448 | Connection error +sample_data_ts_nanos | 1232382 | Disconnected +sample_data_ts_nanos | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 5033755 | Connection error +sample_data_ts_nanos | 8268153 | Connection error ; multiIndexIpStringTsLongRenameToString @@ -903,28 +1088,35 @@ FROM sample_data* METADATA _index | SORT _index ASC, ts DESC ; -_index:keyword | ts:keyword | host_ip:keyword | event_duration:long | message:keyword -sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 -sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 -sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error -sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error -sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error -sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected -sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 -sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +_index:keyword | ts:keyword | host_ip:keyword | event_duration:long | message:keyword +sample_data | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_str | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_str | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_str | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_str | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_str | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_str | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_long | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_long | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_long | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_long | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_long | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 +sample_data_ts_nanos | 2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 +sample_data_ts_nanos | 2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error +sample_data_ts_nanos | 2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error +sample_data_ts_nanos | 2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected +sample_data_ts_nanos | 2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 +sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 ; multiIndexWhereIpStringTsLong @@ -1002,10 +1194,11 @@ FROM sample_data* METADATA _index | SORT _index ASC, ts DESC ; -@timestamp:null | client_ip:null | event_duration:long | message:keyword | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k -null | null | 8268153 | Connection error | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 -null | null | 8268153 | Connection error | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 -null | null | 8268153 | Connection error | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +@timestamp:null | client_ip:null | event_duration:long | message:keyword | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k +null | null | 8268153 | Connection error | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +null | null | 8268153 | Connection error | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +null | null | 8268153 | Connection error | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +null | null | 8268153 | Connection error | sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015123456Z | 1698069175015123456 | 172.21.3.15 | 172.21.3.15 ; multiIndexMultiColumnTypesRenameAndKeep @@ -1020,10 +1213,11 @@ FROM sample_data* METADATA _index | SORT _index ASC, ts DESC ; -_index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k | event_duration:long -sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 -sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 -sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +_index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k | event_duration:long +sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 | 8268153 +sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015123456Z | 1698069175015123456 | 172.21.3.15 | 172.21.3.15 | 8268153 ; multiIndexMultiColumnTypesRenameAndDrop @@ -1038,10 +1232,11 @@ FROM sample_data* METADATA _index | SORT _index ASC, ts DESC ; -event_duration:long | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k -8268153 | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 -8268153 | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 -8268153 | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +event_duration:long | _index:keyword | ts:date | ts_str:keyword | ts_l:long | ip:ip | ip_str:k +8268153 | sample_data | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +8268153 | sample_data_str | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015Z | 1698069175015 | 172.21.3.15 | 172.21.3.15 +8268153 | sample_data_ts_long | 2023-10-23T13:52:55.015Z | 1698069175015 | 1698069175015 | 172.21.3.15 | 172.21.3.15 +8268153 | sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015123456Z | 1698069175015123456 | 172.21.3.15 | 172.21.3.15 ; From 20543579024175b76a621a67dfb4e8924d240263 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Mon, 11 Nov 2024 18:04:09 +0200 Subject: [PATCH 28/95] Refactor DocumentDimensions to RoutingFields (#116321) * Refactor DocumentDimensions to RoutingFields * update * add test * add test * updates from review * updates from review * spotless * remove final from subclass * fix final --- .../bucket/timeseries/InternalTimeSeries.java | 4 +- .../timeseries/TimeSeriesAggregator.java | 9 +- .../timeseries/InternalTimeSeriesTests.java | 7 +- .../timeseries/TimeSeriesAggregatorTests.java | 12 +- .../org/elasticsearch/index/IndexMode.java | 21 +- .../index/mapper/BooleanFieldMapper.java | 2 +- .../index/mapper/DocumentDimensions.java | 92 ----- .../index/mapper/DocumentParserContext.java | 14 +- .../index/mapper/IpFieldMapper.java | 2 +- .../index/mapper/KeywordFieldMapper.java | 2 +- .../index/mapper/NumberFieldMapper.java | 2 +- .../index/mapper/RoutingFields.java | 85 +++++ .../index/mapper/RoutingPathFields.java | 269 +++++++++++++++ .../index/mapper/TimeSeriesIdFieldMapper.java | 323 ++---------------- .../flattened/FlattenedFieldParser.java | 5 +- .../elasticsearch/search/DocValueFormat.java | 16 +- .../index/mapper/IdLoaderTests.java | 36 +- .../index/mapper/RoutingPathFieldsTests.java | 91 +++++ .../search/DocValueFormatTests.java | 12 +- .../rate/TimeSeriesRateAggregatorTests.java | 8 +- .../TimeSeriesSortedSourceOperatorTests.java | 11 +- .../unsignedlong/UnsignedLongFieldMapper.java | 2 +- .../aggregations/GeoLineAggregatorTests.java | 7 +- 23 files changed, 557 insertions(+), 475 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/RoutingFields.java create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/RoutingPathFields.java create mode 100644 server/src/test/java/org/elasticsearch/index/mapper/RoutingPathFieldsTests.java diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java index c283f9fd93957..c4cdacd135cb4 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java @@ -13,7 +13,7 @@ import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.AggregatorReducer; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -68,7 +68,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public Map getKey() { - return TimeSeriesIdFieldMapper.decodeTsidAsMap(key); + return RoutingPathFields.decodeAsMap(key); } @Override diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java index a2fa617ed902b..c74637330dd7a 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.search.aggregations.AggregationExecutionContext; import org.elasticsearch.search.aggregations.Aggregator; @@ -161,11 +162,11 @@ public void collect(int doc, long bucket) throws IOException { if (currentTsidOrd == aggCtx.getTsidHashOrd()) { tsid = currentTsid; } else { - TimeSeriesIdFieldMapper.TimeSeriesIdBuilder tsidBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + RoutingPathFields routingPathFields = new RoutingPathFields(null); for (TsidConsumer consumer : dimensionConsumers.values()) { - consumer.accept(doc, tsidBuilder); + consumer.accept(doc, routingPathFields); } - currentTsid = tsid = tsidBuilder.buildLegacyTsid().toBytesRef(); + currentTsid = tsid = TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef(); } long bucketOrdinal = bucketOrds.add(bucket, tsid); if (bucketOrdinal < 0) { // already seen @@ -189,6 +190,6 @@ InternalTimeSeries buildResult(InternalTimeSeries.InternalBucket[] topBuckets) { @FunctionalInterface interface TsidConsumer { - void accept(int docId, TimeSeriesIdFieldMapper.TimeSeriesIdBuilder tsidBuilder) throws IOException; + void accept(int docId, RoutingPathFields routingFields) throws IOException; } } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java index be841da07ada9..e61c02e0b9cd2 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.aggregations.AggregationReduceContext; @@ -42,12 +43,12 @@ private List randomBuckets(boolean keyed, InternalAggregations a List> keys = randomKeys(bucketKeys(randomIntBetween(1, 4)), numberOfBuckets); for (int j = 0; j < numberOfBuckets; j++) { long docCount = randomLongBetween(0, Long.MAX_VALUE / (20L * numberOfBuckets)); - var builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingPathFields = new RoutingPathFields(null); for (var entry : keys.get(j).entrySet()) { - builder.addString(entry.getKey(), (String) entry.getValue()); + routingPathFields.addString(entry.getKey(), (String) entry.getValue()); } try { - var key = builder.buildLegacyTsid().toBytesRef(); + var key = TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef(); bucketList.add(new InternalBucket(key, docCount, aggregations, keyed)); } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java index 26611127a94df..d9a4023457126 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java @@ -30,8 +30,8 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -93,10 +93,10 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); - final TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(null); + RoutingPathFields routingPathFields = new RoutingPathFields(null); for (int i = 0; i < dimensions.length; i += 2) { if (dimensions[i + 1] instanceof Number n) { - builder.addLong(dimensions[i].toString(), n.longValue()); + routingPathFields.addLong(dimensions[i].toString(), n.longValue()); if (dimensions[i + 1] instanceof Integer || dimensions[i + 1] instanceof Long) { fields.add(new NumericDocValuesField(dimensions[i].toString(), ((Number) dimensions[i + 1]).longValue())); } else if (dimensions[i + 1] instanceof Float) { @@ -105,7 +105,7 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens fields.add(new DoubleDocValuesField(dimensions[i].toString(), (double) dimensions[i + 1])); } } else { - builder.addString(dimensions[i].toString(), dimensions[i + 1].toString()); + routingPathFields.addString(dimensions[i].toString(), dimensions[i + 1].toString()); fields.add(new SortedSetDocValuesField(dimensions[i].toString(), new BytesRef(dimensions[i + 1].toString()))); } } @@ -118,7 +118,9 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens fields.add(new DoubleDocValuesField(metrics[i].toString(), (double) metrics[i + 1])); } } - fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildLegacyTsid().toBytesRef())); + fields.add( + new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef()) + ); iw.addDocument(fields); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index e6339344b6e5f..f5f923f3657f8 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -23,7 +23,6 @@ import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.DocumentDimensions; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -33,6 +32,8 @@ import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.RoutingFields; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; @@ -111,8 +112,8 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return DocumentDimensions.Noop.INSTANCE; + public RoutingFields buildRoutingFields(IndexSettings settings) { + return RoutingFields.Noop.INSTANCE; } @Override @@ -209,9 +210,9 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { + public RoutingFields buildRoutingFields(IndexSettings settings) { IndexRouting.ExtractFromSource routing = (IndexRouting.ExtractFromSource) settings.getIndexRouting(); - return new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(routing.builder()); + return new RoutingPathFields(routing.builder()); } @Override @@ -287,8 +288,8 @@ public MetadataFieldMapper timeSeriesRoutingHashFieldMapper() { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return DocumentDimensions.Noop.INSTANCE; + public RoutingFields buildRoutingFields(IndexSettings settings) { + return RoutingFields.Noop.INSTANCE; } @Override @@ -368,8 +369,8 @@ public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { } @Override - public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { - return DocumentDimensions.Noop.INSTANCE; + public RoutingFields buildRoutingFields(IndexSettings settings) { + return RoutingFields.Noop.INSTANCE; } @Override @@ -524,7 +525,7 @@ public String getName() { /** * How {@code time_series_dimension} fields are handled by indices in this mode. */ - public abstract DocumentDimensions buildDocumentDimensions(IndexSettings settings); + public abstract RoutingFields buildRoutingFields(IndexSettings settings); /** * @return Whether timestamps should be validated for being withing the time range of an index. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 5aaaf7dce83c9..f74d58093a7f5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -499,7 +499,7 @@ private void indexValue(DocumentParserContext context, Boolean value) { } if (fieldType().isDimension()) { - context.getDimensions().addBoolean(fieldType().name(), value).validate(context.indexSettings()); + context.getRoutingFields().addBoolean(fieldType().name(), value); } if (indexed) { context.doc().add(new StringField(fieldType().name(), value ? Values.TRUE : Values.FALSE, Field.Store.NO)); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java deleted file mode 100644 index 8f26d21324d9b..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentDimensions.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.index.mapper; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.index.IndexSettings; - -import java.net.InetAddress; - -/** - * Collects dimensions from documents. - */ -public interface DocumentDimensions { - - /** - * Build an index's DocumentDimensions using its settings - */ - static DocumentDimensions fromIndexSettings(IndexSettings indexSettings) { - return indexSettings.getMode().buildDocumentDimensions(indexSettings); - } - - /** - * This overloaded method tries to take advantage of the fact that the UTF-8 - * value is already computed in some cases when we want to collect - * dimensions, so we can save re-computing the UTF-8 encoding. - */ - DocumentDimensions addString(String fieldName, BytesRef utf8Value); - - default DocumentDimensions addString(String fieldName, String value) { - return addString(fieldName, new BytesRef(value)); - } - - DocumentDimensions addIp(String fieldName, InetAddress value); - - DocumentDimensions addLong(String fieldName, long value); - - DocumentDimensions addUnsignedLong(String fieldName, long value); - - DocumentDimensions addBoolean(String fieldName, boolean value); - - DocumentDimensions validate(IndexSettings settings); - - /** - * Noop implementation that doesn't perform validations on dimension fields - */ - enum Noop implements DocumentDimensions { - - INSTANCE; - - @Override - public DocumentDimensions addString(String fieldName, BytesRef utf8Value) { - return this; - } - - @Override - public DocumentDimensions addString(String fieldName, String value) { - return this; - } - - @Override - public DocumentDimensions addIp(String fieldName, InetAddress value) { - return this; - } - - @Override - public DocumentDimensions addLong(String fieldName, long value) { - return this; - } - - @Override - public DocumentDimensions addUnsignedLong(String fieldName, long value) { - return this; - } - - @Override - public DocumentDimensions addBoolean(String fieldName, boolean value) { - return this; - } - - @Override - public DocumentDimensions validate(IndexSettings settings) { - return this; - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index c84df68a637e2..51e4e9f4c1b5e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -126,7 +126,7 @@ private enum Scope { private final DynamicMapperSize dynamicMappersSize; private final Map dynamicObjectMappers; private final Map> dynamicRuntimeFields; - private final DocumentDimensions dimensions; + private final RoutingFields routingFields; private final ObjectMapper parent; private final ObjectMapper.Dynamic dynamic; private String id; @@ -158,7 +158,7 @@ private DocumentParserContext( String id, Field version, SeqNoFieldMapper.SequenceIDFields seqID, - DocumentDimensions dimensions, + RoutingFields routingFields, ObjectMapper parent, ObjectMapper.Dynamic dynamic, Set fieldsAppliedFromTemplates, @@ -178,7 +178,7 @@ private DocumentParserContext( this.id = id; this.version = version; this.seqID = seqID; - this.dimensions = dimensions; + this.routingFields = routingFields; this.parent = parent; this.dynamic = dynamic; this.fieldsAppliedFromTemplates = fieldsAppliedFromTemplates; @@ -201,7 +201,7 @@ private DocumentParserContext(ObjectMapper parent, ObjectMapper.Dynamic dynamic, in.id, in.version, in.seqID, - in.dimensions, + in.routingFields, parent, dynamic, in.fieldsAppliedFromTemplates, @@ -231,7 +231,7 @@ protected DocumentParserContext( null, null, SeqNoFieldMapper.SequenceIDFields.emptySeqID(), - DocumentDimensions.fromIndexSettings(mappingParserContext.getIndexSettings()), + RoutingFields.fromIndexSettings(mappingParserContext.getIndexSettings()), parent, dynamic, new HashSet<>(), @@ -762,8 +762,8 @@ public XContentParser parser() { /** * The collection of dimensions for this document. */ - public DocumentDimensions getDimensions() { - return dimensions; + public RoutingFields getRoutingFields() { + return routingFields; } public abstract ContentPath path(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 2efeeba893c6c..09f44f139d8bc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -549,7 +549,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio private void indexValue(DocumentParserContext context, InetAddress address) { if (dimension) { - context.getDimensions().addIp(fieldType().name(), address).validate(context.indexSettings()); + context.getRoutingFields().addIp(fieldType().name(), address); } if (indexed) { Field field = new InetAddressPoint(fieldType().name(), address); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index ecc708bc94614..32aa422b18bcc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -944,7 +944,7 @@ private void indexValue(DocumentParserContext context, String value) { final BytesRef binaryValue = new BytesRef(value); if (fieldType().isDimension()) { - context.getDimensions().addString(fieldType().name(), binaryValue).validate(context.indexSettings()); + context.getRoutingFields().addString(fieldType().name(), binaryValue); } // If the UTF8 encoding of the field value is bigger than the max length 32766, Lucene fill fail the indexing request and, to diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 55ed1e10428aa..8c21dfea31b9a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -1991,7 +1991,7 @@ public Number value(XContentParser parser) throws IllegalArgumentException, IOEx */ public void indexValue(DocumentParserContext context, Number numericValue) { if (dimension && numericValue != null) { - context.getDimensions().addLong(fieldType().name(), numericValue.longValue()).validate(context.indexSettings()); + context.getRoutingFields().addLong(fieldType().name(), numericValue.longValue()); } fieldType().type.addFields(context.doc(), fieldType().name(), numericValue, indexed, hasDocValues, stored); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFields.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFields.java new file mode 100644 index 0000000000000..4d8d8fdcbd296 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFields.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.IndexSettings; + +import java.net.InetAddress; + +/** + * Collects fields contributing to routing from documents. + */ +public interface RoutingFields { + + /** + * Collect routing fields from index settings + */ + static RoutingFields fromIndexSettings(IndexSettings indexSettings) { + return indexSettings.getMode().buildRoutingFields(indexSettings); + } + + /** + * This overloaded method tries to take advantage of the fact that the UTF-8 + * value is already computed in some cases when we want to collect + * routing fields, so we can save re-computing the UTF-8 encoding. + */ + RoutingFields addString(String fieldName, BytesRef utf8Value); + + default RoutingFields addString(String fieldName, String value) { + return addString(fieldName, new BytesRef(value)); + } + + RoutingFields addIp(String fieldName, InetAddress value); + + RoutingFields addLong(String fieldName, long value); + + RoutingFields addUnsignedLong(String fieldName, long value); + + RoutingFields addBoolean(String fieldName, boolean value); + + /** + * Noop implementation that doesn't perform validations on routing fields + */ + enum Noop implements RoutingFields { + + INSTANCE; + + @Override + public RoutingFields addString(String fieldName, BytesRef utf8Value) { + return this; + } + + @Override + public RoutingFields addString(String fieldName, String value) { + return this; + } + + @Override + public RoutingFields addIp(String fieldName, InetAddress value) { + return this; + } + + @Override + public RoutingFields addLong(String fieldName, long value) { + return this; + } + + @Override + public RoutingFields addUnsignedLong(String fieldName, long value) { + return this; + } + + @Override + public RoutingFields addBoolean(String fieldName, boolean value) { + return this; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingPathFields.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingPathFields.java new file mode 100644 index 0000000000000..73baca1bf3fdb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingPathFields.java @@ -0,0 +1,269 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; +import org.elasticsearch.cluster.routing.IndexRouting; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.hash.Murmur3Hasher; +import org.elasticsearch.common.hash.MurmurHash3; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.util.ByteUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.search.DocValueFormat; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +/** + * Implementation of routing fields, using field matching based on the routing path content. + */ +public final class RoutingPathFields implements RoutingFields { + + private static final int SEED = 0; + + private static final int MAX_ROUTING_FIELDS = 512; + + private static final int MAX_HASH_LEN_BYTES = 2; + static { + assert MAX_HASH_LEN_BYTES == StreamOutput.putVInt(new byte[2], hashLen(MAX_ROUTING_FIELDS), 0); + } + + /** + * A map of the serialized values of routing fields that will be used + * for generating the _tsid field. The map will be used by {@link RoutingPathFields} + * to build the _tsid field for the document. + */ + private final SortedMap> routingValues = new TreeMap<>(); + + /** + * Builds the routing. Used for building {@code _id}. If null then skipped. + */ + @Nullable + private final IndexRouting.ExtractFromSource.Builder routingBuilder; + + public RoutingPathFields(@Nullable IndexRouting.ExtractFromSource.Builder routingBuilder) { + this.routingBuilder = routingBuilder; + } + + SortedMap> routingValues() { + return Collections.unmodifiableSortedMap(routingValues); + } + + IndexRouting.ExtractFromSource.Builder routingBuilder() { + return routingBuilder; + } + + /** + * Here we build the hash of the routing values using a similarity function so that we have a result + * with the following pattern: + * + * hash128(concatenate(routing field names)) + + * foreach(routing field value, limit = MAX_ROUTING_FIELDS) { hash32(routing field value) } + + * hash128(concatenate(routing field values)) + * + * The idea is to be able to place 'similar' values close to each other. + */ + public BytesReference buildHash() { + Murmur3Hasher hasher = new Murmur3Hasher(SEED); + + // NOTE: hash all routing field names + int numberOfFields = Math.min(MAX_ROUTING_FIELDS, routingValues.size()); + int len = hashLen(numberOfFields); + // either one or two bytes are occupied by the vint since we're bounded by #MAX_ROUTING_FIELDS + byte[] hash = new byte[MAX_HASH_LEN_BYTES + len]; + int index = StreamOutput.putVInt(hash, len, 0); + + hasher.reset(); + for (final BytesRef name : routingValues.keySet()) { + hasher.update(name.bytes); + } + index = writeHash128(hasher.digestHash(), hash, index); + + // NOTE: concatenate all routing field value hashes up to a certain number of fields + int startIndex = index; + for (final List values : routingValues.values()) { + if ((index - startIndex) >= 4 * numberOfFields) { + break; + } + assert values.isEmpty() == false : "routing values are empty"; + final BytesRef routingValue = values.get(0).toBytesRef(); + ByteUtils.writeIntLE( + StringHelper.murmurhash3_x86_32(routingValue.bytes, routingValue.offset, routingValue.length, SEED), + hash, + index + ); + index += 4; + } + + // NOTE: hash all routing field allValues + hasher.reset(); + for (final List values : routingValues.values()) { + for (BytesReference v : values) { + hasher.update(v.toBytesRef().bytes); + } + } + index = writeHash128(hasher.digestHash(), hash, index); + + return new BytesArray(hash, 0, index); + } + + private static int hashLen(int numberOfFields) { + return 16 + 16 + 4 * numberOfFields; + } + + private static int writeHash128(final MurmurHash3.Hash128 hash128, byte[] buffer, int index) { + ByteUtils.writeLongLE(hash128.h1, buffer, index); + index += 8; + ByteUtils.writeLongLE(hash128.h2, buffer, index); + index += 8; + return index; + } + + @Override + public RoutingFields addString(String fieldName, BytesRef utf8Value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 's'); + /* + * Write in utf8 instead of StreamOutput#writeString which is utf-16-ish + * so it's easier for folks to reason about the space taken up. Mostly + * it'll be smaller too. + */ + out.writeBytesRef(utf8Value); + add(fieldName, out.bytes()); + + if (routingBuilder != null) { + routingBuilder.addMatching(fieldName, utf8Value); + } + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + return this; + } + + @Override + public RoutingFields addIp(String fieldName, InetAddress value) { + return addString(fieldName, NetworkAddress.format(value)); + } + + @Override + public RoutingFields addLong(String fieldName, long value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 'l'); + out.writeLong(value); + add(fieldName, out.bytes()); + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + return this; + } + + @Override + public RoutingFields addUnsignedLong(String fieldName, long value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(value); + if (ul instanceof Long l) { + out.write((byte) 'l'); + out.writeLong(l); + } else { + out.write((byte) 'u'); + out.writeLong(value); + } + add(fieldName, out.bytes()); + return this; + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + } + + @Override + public RoutingFields addBoolean(String fieldName, boolean value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 'b'); + out.write(value ? 't' : 'f'); + add(fieldName, out.bytes()); + } catch (IOException e) { + throw new IllegalArgumentException("Routing field cannot be serialized.", e); + } + return this; + } + + private void add(String fieldName, BytesReference encoded) throws IOException { + BytesRef name = new BytesRef(fieldName); + List values = routingValues.get(name); + if (values == null) { + // optimize for the common case where routing fields are not multi-valued + routingValues.put(name, List.of(encoded)); + } else { + if (values.size() == 1) { + // converts the immutable list that's optimized for the common case of having only one value to a mutable list + BytesReference previousValue = values.get(0); + values = new ArrayList<>(4); + values.add(previousValue); + routingValues.put(name, values); + } + values.add(encoded); + } + } + + public static Map decodeAsMap(BytesRef bytesRef) { + try (StreamInput in = new BytesArray(bytesRef).streamInput()) { + int size = in.readVInt(); + Map result = new LinkedHashMap<>(size); + + for (int i = 0; i < size; i++) { + String name = null; + try { + name = in.readSlicedBytesReference().utf8ToString(); + } catch (AssertionError ae) { + throw new IllegalArgumentException("Error parsing routing field: " + ae.getMessage(), ae); + } + + int type = in.read(); + switch (type) { + case (byte) 's' -> { + // parse a string + try { + result.put(name, in.readSlicedBytesReference().utf8ToString()); + } catch (AssertionError ae) { + throw new IllegalArgumentException("Error parsing routing field: " + ae.getMessage(), ae); + } + } + case (byte) 'l' -> // parse a long + result.put(name, in.readLong()); + case (byte) 'u' -> { // parse an unsigned_long + Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(in.readLong()); + result.put(name, ul); + } + case (byte) 'd' -> // parse a double + result.put(name, in.readDouble()); + case (byte) 'b' -> // parse a boolean + result.put(name, in.read() == 't'); + default -> throw new IllegalArgumentException("Cannot parse [" + name + "]: Unknown type [" + type + "]"); + } + } + return result; + } catch (IOException | IllegalArgumentException e) { + throw new IllegalArgumentException("Routing field cannot be deserialized:" + e.getMessage(), e); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index a6b2ad265decf..8af3c3e6ec270 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -12,21 +12,11 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.StringHelper; -import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.hash.Murmur3Hasher; -import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.util.ByteUtils; -import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexMode; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.fielddata.FieldData; @@ -40,15 +30,10 @@ import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import java.io.IOException; -import java.net.InetAddress; import java.time.ZoneId; -import java.util.ArrayList; import java.util.Collections; -import java.util.LinkedHashMap; import java.util.List; -import java.util.Map; import java.util.SortedMap; -import java.util.TreeMap; /** * Mapper for {@code _tsid} field included generated when the index is @@ -136,15 +121,24 @@ private TimeSeriesIdFieldMapper() { public void postParse(DocumentParserContext context) throws IOException { assert fieldType().isIndexed() == false; - final TimeSeriesIdBuilder timeSeriesIdBuilder = (TimeSeriesIdBuilder) context.getDimensions(); - final BytesRef timeSeriesId = getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ID_HASHING) - ? timeSeriesIdBuilder.buildLegacyTsid().toBytesRef() - : timeSeriesIdBuilder.buildTsidHash().toBytesRef(); + final RoutingPathFields routingPathFields = (RoutingPathFields) context.getRoutingFields(); + final BytesRef timeSeriesId; + if (getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ID_HASHING)) { + long limit = context.indexSettings().getValue(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING); + int size = routingPathFields.routingValues().size(); + if (size > limit) { + throw new MapperException("Too many dimension fields [" + size + "], max [" + limit + "] dimension fields allowed"); + } + timeSeriesId = buildLegacyTsid(routingPathFields).toBytesRef(); + } else { + timeSeriesId = routingPathFields.buildHash().toBytesRef(); + } context.doc().add(new SortedDocValuesField(fieldType().name(), timeSeriesId)); + TsidExtractingIdFieldMapper.createField( context, getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID) - ? timeSeriesIdBuilder.routingBuilder + ? routingPathFields.routingBuilder() : null, timeSeriesId ); @@ -170,231 +164,6 @@ public static Object encodeTsid(StreamInput in) { } } - public static class TimeSeriesIdBuilder implements DocumentDimensions { - - private static final int SEED = 0; - - public static final int MAX_DIMENSIONS = 512; - - private final Murmur3Hasher tsidHasher = new Murmur3Hasher(0); - - /** - * A map of the serialized values of dimension fields that will be used - * for generating the _tsid field. The map will be used by {@link TimeSeriesIdFieldMapper} - * to build the _tsid field for the document. - */ - private final SortedMap> dimensions = new TreeMap<>(); - /** - * Builds the routing. Used for building {@code _id}. If null then skipped. - */ - @Nullable - private final IndexRouting.ExtractFromSource.Builder routingBuilder; - - public TimeSeriesIdBuilder(@Nullable IndexRouting.ExtractFromSource.Builder routingBuilder) { - this.routingBuilder = routingBuilder; - } - - public BytesReference buildLegacyTsid() throws IOException { - if (dimensions.isEmpty()) { - throw new IllegalArgumentException("Dimension fields are missing."); - } - - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.writeVInt(dimensions.size()); - for (Map.Entry> entry : dimensions.entrySet()) { - out.writeBytesRef(entry.getKey()); - List value = entry.getValue(); - if (value.size() > 1) { - // multi-value dimensions are only supported for newer indices that use buildTsidHash - throw new IllegalArgumentException( - "Dimension field [" + entry.getKey().utf8ToString() + "] cannot be a multi-valued field." - ); - } - assert value.isEmpty() == false : "dimension value is empty"; - value.get(0).writeTo(out); - } - return out.bytes(); - } - } - - private static final int MAX_HASH_LEN_BYTES = 2; - - static { - assert MAX_HASH_LEN_BYTES == StreamOutput.putVInt(new byte[2], tsidHashLen(MAX_DIMENSIONS), 0); - } - - /** - * Here we build the hash of the tsid using a similarity function so that we have a result - * with the following pattern: - * - * hash128(catenate(dimension field names)) + - * foreach(dimension field value, limit = MAX_DIMENSIONS) { hash32(dimension field value) } + - * hash128(catenate(dimension field values)) - * - * The idea is to be able to place 'similar' time series close to each other. Two time series - * are considered 'similar' if they share the same dimensions (names and values). - */ - public BytesReference buildTsidHash() { - // NOTE: hash all dimension field names - int numberOfDimensions = Math.min(MAX_DIMENSIONS, dimensions.size()); - int len = tsidHashLen(numberOfDimensions); - // either one or two bytes are occupied by the vint since we're bounded by #MAX_DIMENSIONS - byte[] tsidHash = new byte[MAX_HASH_LEN_BYTES + len]; - int tsidHashIndex = StreamOutput.putVInt(tsidHash, len, 0); - - tsidHasher.reset(); - for (final BytesRef name : dimensions.keySet()) { - tsidHasher.update(name.bytes); - } - tsidHashIndex = writeHash128(tsidHasher.digestHash(), tsidHash, tsidHashIndex); - - // NOTE: concatenate all dimension value hashes up to a certain number of dimensions - int tsidHashStartIndex = tsidHashIndex; - for (final List values : dimensions.values()) { - if ((tsidHashIndex - tsidHashStartIndex) >= 4 * numberOfDimensions) { - break; - } - assert values.isEmpty() == false : "dimension values are empty"; - final BytesRef dimensionValueBytesRef = values.get(0).toBytesRef(); - ByteUtils.writeIntLE( - StringHelper.murmurhash3_x86_32( - dimensionValueBytesRef.bytes, - dimensionValueBytesRef.offset, - dimensionValueBytesRef.length, - SEED - ), - tsidHash, - tsidHashIndex - ); - tsidHashIndex += 4; - } - - // NOTE: hash all dimension field allValues - tsidHasher.reset(); - for (final List values : dimensions.values()) { - for (BytesReference v : values) { - tsidHasher.update(v.toBytesRef().bytes); - } - } - tsidHashIndex = writeHash128(tsidHasher.digestHash(), tsidHash, tsidHashIndex); - - return new BytesArray(tsidHash, 0, tsidHashIndex); - } - - private static int tsidHashLen(int numberOfDimensions) { - return 16 + 16 + 4 * numberOfDimensions; - } - - private int writeHash128(final MurmurHash3.Hash128 hash128, byte[] buffer, int tsidHashIndex) { - ByteUtils.writeLongLE(hash128.h1, buffer, tsidHashIndex); - tsidHashIndex += 8; - ByteUtils.writeLongLE(hash128.h2, buffer, tsidHashIndex); - tsidHashIndex += 8; - return tsidHashIndex; - } - - @Override - public DocumentDimensions addString(String fieldName, BytesRef utf8Value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.write((byte) 's'); - /* - * Write in utf8 instead of StreamOutput#writeString which is utf-16-ish - * so it's easier for folks to reason about the space taken up. Mostly - * it'll be smaller too. - */ - out.writeBytesRef(utf8Value); - add(fieldName, out.bytes()); - - if (routingBuilder != null) { - routingBuilder.addMatching(fieldName, utf8Value); - } - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - return this; - } - - @Override - public DocumentDimensions addIp(String fieldName, InetAddress value) { - return addString(fieldName, NetworkAddress.format(value)); - } - - @Override - public DocumentDimensions addLong(String fieldName, long value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.write((byte) 'l'); - out.writeLong(value); - add(fieldName, out.bytes()); - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - return this; - } - - @Override - public DocumentDimensions addUnsignedLong(String fieldName, long value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(value); - if (ul instanceof Long l) { - out.write((byte) 'l'); - out.writeLong(l); - } else { - out.write((byte) 'u'); - out.writeLong(value); - } - add(fieldName, out.bytes()); - return this; - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - } - - @Override - public DocumentDimensions addBoolean(String fieldName, boolean value) { - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.write((byte) 'b'); - out.write(value ? 't' : 'f'); - add(fieldName, out.bytes()); - } catch (IOException e) { - throw new IllegalArgumentException("Dimension field cannot be serialized.", e); - } - return this; - } - - @Override - public DocumentDimensions validate(final IndexSettings settings) { - if (settings.getIndexVersionCreated().before(IndexVersions.TIME_SERIES_ID_HASHING) - && dimensions.size() > settings.getValue(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING)) { - throw new MapperException( - "Too many dimension fields [" - + dimensions.size() - + "], max [" - + settings.getValue(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING) - + "] dimension fields allowed" - ); - } - return this; - } - - private void add(String fieldName, BytesReference encoded) throws IOException { - BytesRef name = new BytesRef(fieldName); - List values = dimensions.get(name); - if (values == null) { - // optimize for the common case where dimensions are not multi-valued - dimensions.put(name, List.of(encoded)); - } else { - if (values.size() == 1) { - // converts the immutable list that's optimized for the common case of having only one value to a mutable list - BytesReference previousValue = values.get(0); - values = new ArrayList<>(4); - values.add(previousValue); - dimensions.put(name, values); - } - values.add(encoded); - } - } - } - public static Object encodeTsid(final BytesRef bytesRef) { return base64Encode(bytesRef); } @@ -405,53 +174,27 @@ private static String base64Encode(final BytesRef bytesRef) { return Strings.BASE_64_NO_PADDING_URL_ENCODER.encodeToString(bytes); } - public static Map decodeTsidAsMap(BytesRef bytesRef) { - try (StreamInput input = new BytesArray(bytesRef).streamInput()) { - return decodeTsidAsMap(input); - } catch (IOException ex) { - throw new IllegalArgumentException("Dimension field cannot be deserialized.", ex); - } - } - - public static Map decodeTsidAsMap(StreamInput in) { - try { - int size = in.readVInt(); - Map result = new LinkedHashMap<>(size); - - for (int i = 0; i < size; i++) { - String name = null; - try { - name = in.readSlicedBytesReference().utf8ToString(); - } catch (AssertionError ae) { - throw new IllegalArgumentException("Error parsing keyword dimension: " + ae.getMessage(), ae); - } - - int type = in.read(); - switch (type) { - case (byte) 's' -> { - // parse a string - try { - result.put(name, in.readSlicedBytesReference().utf8ToString()); - } catch (AssertionError ae) { - throw new IllegalArgumentException("Error parsing keyword dimension: " + ae.getMessage(), ae); - } - } - case (byte) 'l' -> // parse a long - result.put(name, in.readLong()); - case (byte) 'u' -> { // parse an unsigned_long - Object ul = DocValueFormat.UNSIGNED_LONG_SHIFTED.format(in.readLong()); - result.put(name, ul); - } - case (byte) 'd' -> // parse a double - result.put(name, in.readDouble()); - case (byte) 'b' -> // parse a boolean - result.put(name, in.read() == 't'); - default -> throw new IllegalArgumentException("Cannot parse [" + name + "]: Unknown type [" + type + "]"); + public static BytesReference buildLegacyTsid(RoutingPathFields routingPathFields) throws IOException { + SortedMap> routingValues = routingPathFields.routingValues(); + if (routingValues.isEmpty()) { + throw new IllegalArgumentException("Dimension fields are missing."); + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(routingValues.size()); + for (var entry : routingValues.entrySet()) { + out.writeBytesRef(entry.getKey()); + List value = entry.getValue(); + if (value.size() > 1) { + // multi-value dimensions are only supported for newer indices that use buildTsidHash + throw new IllegalArgumentException( + "Dimension field [" + entry.getKey().utf8ToString() + "] cannot be a multi-valued field." + ); } + assert value.isEmpty() == false : "dimension value is empty"; + value.get(0).writeTo(out); } - return result; - } catch (IOException | IllegalArgumentException e) { - throw new IllegalArgumentException("Error formatting " + NAME + ": " + e.getMessage(), e); + return out.bytes(); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java index 351e3149da3df..93ef04ddd159a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldParser.java @@ -184,10 +184,7 @@ private void addField(Context context, ContentPath path, String currentName, Str final String keyedFieldName = FlattenedFieldParser.extractKey(bytesKeyedValue).utf8ToString(); if (fieldType.isDimension() && fieldType.dimensions().contains(keyedFieldName)) { final BytesRef keyedFieldValue = FlattenedFieldParser.extractValue(bytesKeyedValue); - context.documentParserContext() - .getDimensions() - .addString(rootFieldFullPath + "." + keyedFieldName, keyedFieldValue) - .validate(context.documentParserContext().indexSettings()); + context.documentParserContext().getRoutingFields().addString(rootFieldFullPath + "." + keyedFieldName, keyedFieldValue); } } } diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index bdefee988248f..51f52326907eb 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -22,8 +22,8 @@ import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.geometry.utils.Geohash; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; import java.io.IOException; @@ -729,7 +729,7 @@ public Object format(BytesRef value) { try { // NOTE: if the tsid is a map of dimension key/value pairs (as it was before introducing // tsid hashing) we just decode the map and return it. - return TimeSeriesIdFieldMapper.decodeTsidAsMap(value); + return RoutingPathFields.decodeAsMap(value); } catch (Exception e) { // NOTE: otherwise the _tsid field is just a hash and we can't decode it return TimeSeriesIdFieldMapper.encodeTsid(value); @@ -760,20 +760,20 @@ private BytesRef parseBytesRefMap(Object value) { } Map m = (Map) value; - TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(null); + RoutingPathFields routingPathFields = new RoutingPathFields(null); for (Map.Entry entry : m.entrySet()) { String f = entry.getKey().toString(); Object v = entry.getValue(); if (v instanceof String s) { - builder.addString(f, s); + routingPathFields.addString(f, s); } else if (v instanceof Long l) { - builder.addLong(f, l); + routingPathFields.addLong(f, l); } else if (v instanceof Integer i) { - builder.addLong(f, i.longValue()); + routingPathFields.addLong(f, i.longValue()); } else if (v instanceof BigInteger ul) { long ll = UNSIGNED_LONG_SHIFTED.parseLong(ul.toString(), false, () -> 0L); - builder.addUnsignedLong(f, ll); + routingPathFields.addUnsignedLong(f, ll); } else { throw new IllegalArgumentException("Unexpected value in tsid object [" + v + "]"); } @@ -781,7 +781,7 @@ private BytesRef parseBytesRefMap(Object value) { try { // NOTE: we can decode the tsid only if it is not hashed (represented as a map) - return builder.buildLegacyTsid().toBytesRef(); + return TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef(); } catch (IOException e) { throw new IllegalArgumentException(e); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java index b07ec8e7cb683..083efccceec16 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdLoaderTests.java @@ -27,11 +27,7 @@ import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -72,8 +68,6 @@ public void testSynthesizeIdSimple() throws Exception { } public void testSynthesizeIdMultipleSegments() throws Exception { - var routingPaths = List.of("dim1"); - var routing = createRouting(routingPaths); var idLoader = IdLoader.createTsIdLoader(null, null); long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); @@ -144,8 +138,6 @@ public void testSynthesizeIdMultipleSegments() throws Exception { } public void testSynthesizeIdRandom() throws Exception { - var routingPaths = List.of("dim1"); - var routing = createRouting(routingPaths); var idLoader = IdLoader.createTsIdLoader(null, null); long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); @@ -153,7 +145,6 @@ public void testSynthesizeIdRandom() throws Exception { List randomDocs = new ArrayList<>(); int numberOfTimeSeries = randomIntBetween(8, 64); for (int i = 0; i < numberOfTimeSeries; i++) { - long routingId = 0; int numberOfDimensions = randomIntBetween(1, 6); List dimensions = new ArrayList<>(numberOfDimensions); for (int j = 1; j <= numberOfDimensions; j++) { @@ -165,7 +156,6 @@ public void testSynthesizeIdRandom() throws Exception { value = randomAlphaOfLength(4); } dimensions.add(new Dimension(fieldName, value)); - routingId = value.hashCode(); } int numberOfSamples = randomIntBetween(1, 16); for (int j = 0; j < numberOfSamples; j++) { @@ -225,21 +215,21 @@ private void prepareIndexReader( } private static void indexDoc(IndexWriter iw, Doc doc, int routingHash) throws IOException { - final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingFields = new RoutingPathFields(null); final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, doc.timestamp)); fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, doc.timestamp)); for (Dimension dimension : doc.dimensions) { if (dimension.value instanceof Number n) { - builder.addLong(dimension.field, n.longValue()); + routingFields.addLong(dimension.field, n.longValue()); fields.add(new SortedNumericDocValuesField(dimension.field, ((Number) dimension.value).longValue())); } else { - builder.addString(dimension.field, dimension.value.toString()); + routingFields.addString(dimension.field, dimension.value.toString()); fields.add(new SortedSetDocValuesField(dimension.field, new BytesRef(dimension.value.toString()))); } } - BytesRef tsid = builder.buildTsidHash().toBytesRef(); + BytesRef tsid = routingFields.buildHash().toBytesRef(); fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, tsid)); fields.add( new SortedDocValuesField( @@ -251,25 +241,15 @@ private static void indexDoc(IndexWriter iw, Doc doc, int routingHash) throws IO } private static String expectedId(Doc doc, int routingHash) throws IOException { - var timeSeriesIdBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingFields = new RoutingPathFields(null); for (Dimension dimension : doc.dimensions) { if (dimension.value instanceof Number n) { - timeSeriesIdBuilder.addLong(dimension.field, n.longValue()); + routingFields.addLong(dimension.field, n.longValue()); } else { - timeSeriesIdBuilder.addString(dimension.field, dimension.value.toString()); + routingFields.addString(dimension.field, dimension.value.toString()); } } - return TsidExtractingIdFieldMapper.createId(routingHash, timeSeriesIdBuilder.buildTsidHash().toBytesRef(), doc.timestamp); - } - - private static IndexRouting.ExtractFromSource createRouting(List routingPaths) { - var settings = indexSettings(IndexVersion.current(), 2, 1).put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2000-01-01T00:00:00.000Z") - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2001-01-01T00:00:00.000Z") - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), routingPaths) - .build(); - var indexMetadata = IndexMetadata.builder("index").settings(settings).build(); - return (IndexRouting.ExtractFromSource) IndexRouting.fromIndexMetadata(indexMetadata); + return TsidExtractingIdFieldMapper.createId(routingHash, routingFields.buildHash().toBytesRef(), doc.timestamp); } record Doc(long timestamp, List dimensions) {} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RoutingPathFieldsTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RoutingPathFieldsTests.java new file mode 100644 index 0000000000000..2c2c0d160c904 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/RoutingPathFieldsTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.IndexRouting; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ESTestCase; + +public class RoutingPathFieldsTests extends ESTestCase { + + public void testWithBuilder() throws Exception { + IndexSettings settings = new IndexSettings( + IndexMetadata.builder("test") + .settings( + indexSettings(IndexVersion.current(), 1, 1).put( + Settings.builder().put("index.mode", "time_series").put("index.routing_path", "path.*").build() + ) + ) + .build(), + Settings.EMPTY + ); + IndexRouting.ExtractFromSource routing = (IndexRouting.ExtractFromSource) settings.getIndexRouting(); + + var routingPathFields = new RoutingPathFields(routing.builder()); + BytesReference current, previous; + + routingPathFields.addString("path.string_name", randomAlphaOfLengthBetween(1, 10)); + current = previous = routingPathFields.buildHash(); + assertNotNull(current); + + routingPathFields.addBoolean("path.boolean_name", randomBoolean()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addLong("path.long_name", randomLong()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addIp("path.ip_name", randomIp(randomBoolean())); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addUnsignedLong("path.unsigned_long_name", randomLongBetween(0, Long.MAX_VALUE)); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + assertArrayEquals(current.array(), routingPathFields.buildHash().array()); + } + + public void testWithoutBuilder() throws Exception { + var routingPathFields = new RoutingPathFields(null); + BytesReference current, previous; + + routingPathFields.addString("path.string_name", randomAlphaOfLengthBetween(1, 10)); + current = previous = routingPathFields.buildHash(); + assertNotNull(current); + + routingPathFields.addBoolean("path.boolean_name", randomBoolean()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addLong("path.long_name", randomLong()); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addIp("path.ip_name", randomIp(randomBoolean())); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + previous = current; + + routingPathFields.addUnsignedLong("path.unsigned_long_name", randomLongBetween(0, Long.MAX_VALUE)); + current = routingPathFields.buildHash(); + assertTrue(current.length() > previous.length()); + assertArrayEquals(current.array(), routingPathFields.buildHash().array()); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index 5371893993318..e81066a731d2e 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.mapper.DateFieldMapper.Resolution; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.test.ESTestCase; @@ -379,11 +379,11 @@ public void testParseZone() { } public void testParseTsid() throws IOException { - TimeSeriesIdBuilder timeSeriesIdBuilder = new TimeSeriesIdBuilder(null); - timeSeriesIdBuilder.addString("string", randomAlphaOfLength(10)); - timeSeriesIdBuilder.addLong("long", randomLong()); - timeSeriesIdBuilder.addUnsignedLong("ulong", randomLong()); - BytesRef expected = timeSeriesIdBuilder.buildTsidHash().toBytesRef(); + var routingFields = new RoutingPathFields(null); + routingFields.addString("string", randomAlphaOfLength(10)); + routingFields.addLong("long", randomLong()); + routingFields.addUnsignedLong("ulong", randomLong()); + BytesRef expected = routingFields.buildHash().toBytesRef(); byte[] expectedBytes = new byte[expected.length]; System.arraycopy(expected.bytes, 0, expectedBytes, 0, expected.length); BytesRef actual = DocValueFormat.TIME_SERIES_ID.parseBytesRef(expected); diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java index 3c7a18de536bc..e684092099948 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.AggregatorTestCase; @@ -176,9 +176,9 @@ private List docs(long startTimestamp, String dim, long... values) thr } private static BytesReference tsid(String dim) throws IOException { - TimeSeriesIdFieldMapper.TimeSeriesIdBuilder idBuilder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); - idBuilder.addString("dim", dim); - return idBuilder.buildTsidHash(); + var routingFields = new RoutingPathFields(null); + routingFields.addString("dim", dim); + return routingFields.buildHash(); } private Document doc(long timestamp, BytesReference tsid, long counterValue, String dim) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java index b126ca8af0e31..4863eea5d5ca3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.hamcrest.Matcher; import org.junit.After; @@ -363,12 +364,12 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); - final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); + var routingPathFields = new RoutingPathFields(null); for (int i = 0; i < dimensions.length; i += 2) { if (dimensions[i + 1] instanceof Number n) { - builder.addLong(dimensions[i].toString(), n.longValue()); + routingPathFields.addLong(dimensions[i].toString(), n.longValue()); } else { - builder.addString(dimensions[i].toString(), dimensions[i + 1].toString()); + routingPathFields.addString(dimensions[i].toString(), dimensions[i + 1].toString()); fields.add(new SortedSetDocValuesField(dimensions[i].toString(), new BytesRef(dimensions[i + 1].toString()))); } } @@ -382,7 +383,9 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens } } // Use legacy tsid to make tests easier to understand: - fields.add(new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildLegacyTsid().toBytesRef())); + fields.add( + new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.buildLegacyTsid(routingPathFields).toBytesRef()) + ); iw.addDocument(fields); } } diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index b43d87c17e644..ec04bfdd058f9 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -645,7 +645,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } if (dimension && numericValue != null) { - context.getDimensions().addUnsignedLong(fieldType().name(), numericValue).validate(context.indexSettings()); + context.getRoutingFields().addUnsignedLong(fieldType().name(), numericValue); } List fields = new ArrayList<>(); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java index 86575d418e605..1a9eb1fde6c87 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregatorTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.RoutingPathFields; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -797,12 +798,12 @@ private void assertGeoLine_TSDB( ArrayList points = testData.pointsForGroup(g); ArrayList timestamps = testData.timestampsForGroup(g); for (int i = 0; i < points.size(); i++) { - final TimeSeriesIdFieldMapper.TimeSeriesIdBuilder builder = new TimeSeriesIdFieldMapper.TimeSeriesIdBuilder(null); - builder.addString("group_id", testData.groups[g]); + var routingFields = new RoutingPathFields(null); + routingFields.addString("group_id", testData.groups[g]); ArrayList fields = new ArrayList<>( Arrays.asList( new SortedDocValuesField("group_id", new BytesRef(testData.groups[g])), - new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, builder.buildTsidHash().toBytesRef()) + new SortedDocValuesField(TimeSeriesIdFieldMapper.NAME, routingFields.buildHash().toBytesRef()) ) ); GeoPoint point = points.get(i); From b517abcb07720497086397d25f17e111585f8316 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 11 Nov 2024 11:13:56 -0500 Subject: [PATCH 29/95] Document new ip geolocation fields (#116603) --- docs/changelog/114193.yaml | 5 +++++ docs/changelog/114268.yaml | 5 +++++ docs/changelog/114521.yaml | 5 +++++ docs/reference/ingest/processors/geoip.asciidoc | 16 ++++++++++------ 4 files changed, 25 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/114193.yaml create mode 100644 docs/changelog/114268.yaml create mode 100644 docs/changelog/114521.yaml diff --git a/docs/changelog/114193.yaml b/docs/changelog/114193.yaml new file mode 100644 index 0000000000000..f18f9359007b8 --- /dev/null +++ b/docs/changelog/114193.yaml @@ -0,0 +1,5 @@ +pr: 114193 +summary: Add postal_code support to the City and Enterprise databases +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/114268.yaml b/docs/changelog/114268.yaml new file mode 100644 index 0000000000000..5e4457005d7d3 --- /dev/null +++ b/docs/changelog/114268.yaml @@ -0,0 +1,5 @@ +pr: 114268 +summary: Support more maxmind fields in the geoip processor +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/114521.yaml b/docs/changelog/114521.yaml new file mode 100644 index 0000000000000..c3a9c7cdd0848 --- /dev/null +++ b/docs/changelog/114521.yaml @@ -0,0 +1,5 @@ +pr: 114521 +summary: Add support for registered country fields for maxmind geoip databases +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 3a9ba58dedbf0..2eff56f87e826 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -51,10 +51,12 @@ field instead. *Depends on what is available in `database_file`: * If a GeoLite2 City or GeoIP2 City database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, -and `location`. The fields actually added depend on what has been found and which properties were configured in `properties`. +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, and `accuracy_radius`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If a GeoLite2 Country or GeoIP2 Country database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, and `continent_name`. The fields actually added depend on what has been found +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, and `continent_name`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoLite2 ASN database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, `organization_name` and `network`. The fields actually added depend on what has been found and which properties were configured @@ -70,10 +72,12 @@ The fields actually added depend on what has been found and which properties wer `organization_name`, `network`, `isp`, `isp_organization_name`, `mobile_country_code`, and `mobile_network_code`. The fields actually added depend on what has been found and which properties were configured in `properties`. * If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, -`country_iso_code`, `country_name`, `continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `timezone`, -`location`, `asn`, `organization_name`, `network`, `hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, `accuracy_radius`, `country_confidence`, `city_confidence`, `postal_confidence`, `asn`, `organization_name`, `network`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, `residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and -`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. preview::["Do not use the GeoIP2 Anonymous IP, GeoIP2 Connection Type, GeoIP2 Domain, GeoIP2 ISP, and GeoIP2 Enterprise databases in production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] From bca530ceefcf2d5b13ed7ffc39ae37657abb719d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Iv=C3=A1n=20Cea=20Fontenla?= Date: Mon, 11 Nov 2024 17:19:13 +0100 Subject: [PATCH 30/95] Fix NPE in EnrichLookupService on mixed clusters with <8.14 versions (#116583) Fixes https://github.com/elastic/elasticsearch/issues/116529 Fixes https://github.com/elastic/elasticsearch/issues/116544 --- docs/changelog/116583.yaml | 7 +++++++ .../xpack/esql/enrich/AbstractLookupService.java | 7 ++++++- .../xpack/esql/enrich/EnrichLookupService.java | 6 +++--- 3 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/116583.yaml diff --git a/docs/changelog/116583.yaml b/docs/changelog/116583.yaml new file mode 100644 index 0000000000000..3dc8337fe5b86 --- /dev/null +++ b/docs/changelog/116583.yaml @@ -0,0 +1,7 @@ +pr: 116583 +summary: Fix NPE in `EnrichLookupService` on mixed clusters with <8.14 versions +area: ES|QL +type: bug +issues: + - 116529 + - 116544 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java index 2419aa83845a8..286ddbaa29a5b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/AbstractLookupService.java @@ -45,6 +45,7 @@ import org.elasticsearch.compute.operator.lookup.MergePositionsOperator; import org.elasticsearch.compute.operator.lookup.QueryList; import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -185,7 +186,7 @@ protected static QueryList termQueryList( return switch (inputDataType) { case IP -> QueryList.ipTermQueryList(field, searchExecutionContext, (BytesRefBlock) block); case DATETIME -> QueryList.dateTermQueryList(field, searchExecutionContext, (LongBlock) block); - default -> QueryList.rawTermQueryList(field, searchExecutionContext, block); + case null, default -> QueryList.rawTermQueryList(field, searchExecutionContext, block); }; } @@ -459,6 +460,10 @@ abstract static class Request { abstract static class TransportRequest extends org.elasticsearch.transport.TransportRequest implements IndicesRequest { final String sessionId; final ShardId shardId; + /** + * For mixed clusters with nodes <8.14, this will be null. + */ + @Nullable final DataType inputDataType; final Page inputPage; final List extractFields; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index f24a16bb63697..2d85b46e33a8c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -127,9 +127,9 @@ static TransportRequest readFrom(StreamInput in, BlockFactory blockFactory) thro TaskId parentTaskId = TaskId.readFromStream(in); String sessionId = in.readString(); ShardId shardId = new ShardId(in); - DataType inputDataType = DataType.fromTypeName( - (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) ? in.readString() : "unknown" - ); + DataType inputDataType = (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) + ? DataType.fromTypeName(in.readString()) + : null; String matchType = in.readString(); String matchField = in.readString(); Page inputPage; From 9d8240b6480b5309256149158652da69e46c9acd Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 12 Nov 2024 03:30:08 +1100 Subject: [PATCH 31/95] Mute org.elasticsearch.smoketest.MlWithSecurityIT test {yaml=ml/data_frame_analytics_crud/Test delete given stopped config} #116608 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index f49b303a2bc50..1a8cad4e0b391 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -266,6 +266,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=ml/inference_crud/Test force delete given model referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/116555 +- class: org.elasticsearch.smoketest.MlWithSecurityIT + method: test {yaml=ml/data_frame_analytics_crud/Test delete given stopped config} + issue: https://github.com/elastic/elasticsearch/issues/116608 # Examples: # From be43c97ff361eb5c6907fa262e820bdc3ae3c205 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 12 Nov 2024 04:11:17 +1100 Subject: [PATCH 32/95] Mute org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT test {categorize.Categorize} #116434 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 1a8cad4e0b391..ba810037130f7 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -269,6 +269,9 @@ tests: - class: org.elasticsearch.smoketest.MlWithSecurityIT method: test {yaml=ml/data_frame_analytics_crud/Test delete given stopped config} issue: https://github.com/elastic/elasticsearch/issues/116608 +- class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT + method: test {categorize.Categorize} + issue: https://github.com/elastic/elasticsearch/issues/116434 # Examples: # From 2302cdbe4542db2fdcd1ee8cb395ea00e4f7536a Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 11 Nov 2024 13:52:47 -0500 Subject: [PATCH 33/95] Document new ip_location APIs (#116611) --- docs/changelog/114548.yaml | 5 + ...c => delete-ip-location-database.asciidoc} | 28 +++--- ...idoc => get-ip-location-database.asciidoc} | 30 +++--- docs/reference/ingest/apis/index.asciidoc | 18 ++-- .../ingest/apis/put-geoip-database.asciidoc | 72 --------------- .../apis/put-ip-location-database.asciidoc | 92 +++++++++++++++++++ docs/reference/redirects.asciidoc | 17 +++- 7 files changed, 150 insertions(+), 112 deletions(-) create mode 100644 docs/changelog/114548.yaml rename docs/reference/ingest/apis/{delete-geoip-database.asciidoc => delete-ip-location-database.asciidoc} (52%) rename docs/reference/ingest/apis/{get-geoip-database.asciidoc => get-ip-location-database.asciidoc} (65%) delete mode 100644 docs/reference/ingest/apis/put-geoip-database.asciidoc create mode 100644 docs/reference/ingest/apis/put-ip-location-database.asciidoc diff --git a/docs/changelog/114548.yaml b/docs/changelog/114548.yaml new file mode 100644 index 0000000000000..b9692bcb2d10c --- /dev/null +++ b/docs/changelog/114548.yaml @@ -0,0 +1,5 @@ +pr: 114548 +summary: Support IPinfo database configurations +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/reference/ingest/apis/delete-geoip-database.asciidoc b/docs/reference/ingest/apis/delete-ip-location-database.asciidoc similarity index 52% rename from docs/reference/ingest/apis/delete-geoip-database.asciidoc rename to docs/reference/ingest/apis/delete-ip-location-database.asciidoc index 957e59f0f0de4..c3a10a914d2f4 100644 --- a/docs/reference/ingest/apis/delete-geoip-database.asciidoc +++ b/docs/reference/ingest/apis/delete-ip-location-database.asciidoc @@ -1,30 +1,30 @@ -[[delete-geoip-database-api]] -=== Delete geoip database configuration API +[[delete-ip-location-database-api]] +=== Delete IP geolocation database configuration API ++++ -Delete geoip database configuration +Delete IP geolocation database configuration ++++ -Deletes a geoip database configuration. +Deletes a IP geolocation database configuration. [source,console] ---- -DELETE /_ingest/geoip/database/my-database-id +DELETE /_ingest/ip_location/database/my-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] -[[delete-geoip-database-api-request]] +[[delete-ip-location-database-api-request]] ==== {api-request-title} -`DELETE /_ingest/geoip/database/` +`DELETE /_ingest/ip_location/database/` -[[delete-geoip-database-api-prereqs]] +[[delete-ip-location-database-api-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have the `manage` <> to use this API. -[[delete-geoip-database-api-path-params]] +[[delete-ip-location-database-api-path-params]] ==== {api-path-parms-title} ``:: @@ -35,21 +35,21 @@ DELETE /_ingest/geoip/database/my-database-id -- -[[delete-geoip-database-api-query-params]] +[[delete-ip-location-database-api-query-params]] ==== {api-query-parms-title} include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] -[[delete-geoip-database-api-example]] +[[delete-ip-location-database-api-example]] ==== {api-examples-title} -[[delete-geoip-database-api-specific-ex]] -===== Delete a specific geoip database configuration +[[delete-ip-location-database-api-specific-ex]] +===== Delete a specific IP geolocation database configuration [source,console] ---- -DELETE /_ingest/geoip/database/example-database-id +DELETE /_ingest/ip_location/database/example-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] diff --git a/docs/reference/ingest/apis/get-geoip-database.asciidoc b/docs/reference/ingest/apis/get-ip-location-database.asciidoc similarity index 65% rename from docs/reference/ingest/apis/get-geoip-database.asciidoc rename to docs/reference/ingest/apis/get-ip-location-database.asciidoc index f055e3e759db8..26e9ddc1eee50 100644 --- a/docs/reference/ingest/apis/get-geoip-database.asciidoc +++ b/docs/reference/ingest/apis/get-ip-location-database.asciidoc @@ -1,33 +1,33 @@ -[[get-geoip-database-api]] -=== Get geoip database configuration API +[[get-ip-location-database-api]] +=== Get IP geolocation database configuration API ++++ -Get geoip database configuration +Get IP geolocation database configuration ++++ -Returns information about one or more geoip database configurations. +Returns information about one or more IP geolocation database configurations. [source,console] ---- -GET /_ingest/geoip/database/my-database-id +GET /_ingest/ip_location/database/my-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] -[[get-geoip-database-api-request]] +[[get-ip-location-database-api-request]] ==== {api-request-title} -`GET /_ingest/geoip/database/` +`GET /_ingest/ip_location/database/` -`GET /_ingest/geoip/database` +`GET /_ingest/ip_location/database` -[[get-geoip-database-api-prereqs]] +[[get-ip-location-database-api-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have the `manage` <> to use this API. -[[get-geoip-database-api-path-params]] +[[get-ip-location-database-api-path-params]] ==== {api-path-parms-title} ``:: @@ -38,22 +38,22 @@ supported. To get all database configurations, omit this parameter or use `*`. -[[get-geoip-database-api-query-params]] +[[get-ip-location-database-api-query-params]] ==== {api-query-parms-title} include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] -[[get-geoip-database-api-example]] +[[get-ip-location-database-api-example]] ==== {api-examples-title} -[[get-geoip-database-api-specific-ex]] -===== Get information for a specific geoip database configuration +[[get-ip-location-database-api-specific-ex]] +===== Get information for a specific IP geolocation database configuration [source,console] ---- -GET /_ingest/geoip/database/my-database-id +GET /_ingest/ip_location/database/my-database-id ---- // TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] diff --git a/docs/reference/ingest/apis/index.asciidoc b/docs/reference/ingest/apis/index.asciidoc index e068f99ea0ad3..35adc47821978 100644 --- a/docs/reference/ingest/apis/index.asciidoc +++ b/docs/reference/ingest/apis/index.asciidoc @@ -25,16 +25,14 @@ Use the following APIs to get statistics about ingest processing: the <>. [discrete] -[[ingest-geoip-database-apis]] -=== Ingest GeoIP Database APIs - -preview::["The commercial IP geolocation database download management APIs are in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but this feature is not subject to the support SLA of official GA features."] +[[ingest-ip-location-database-apis]] +=== Ingest IP Location Database APIs Use the following APIs to configure and manage commercial IP geolocation database downloads: -* <> to create or update a database configuration -* <> to retrieve a database configuration -* <> to delete a database configuration +* <> to create or update a database configuration +* <> to retrieve a database configuration +* <> to delete a database configuration include::put-pipeline.asciidoc[] include::get-pipeline.asciidoc[] @@ -42,6 +40,6 @@ include::delete-pipeline.asciidoc[] include::simulate-pipeline.asciidoc[] include::simulate-ingest.asciidoc[] include::geoip-stats.asciidoc[] -include::put-geoip-database.asciidoc[] -include::get-geoip-database.asciidoc[] -include::delete-geoip-database.asciidoc[] +include::put-ip-location-database.asciidoc[] +include::get-ip-location-database.asciidoc[] +include::delete-ip-location-database.asciidoc[] diff --git a/docs/reference/ingest/apis/put-geoip-database.asciidoc b/docs/reference/ingest/apis/put-geoip-database.asciidoc deleted file mode 100644 index 311c303002387..0000000000000 --- a/docs/reference/ingest/apis/put-geoip-database.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -[[put-geoip-database-api]] -=== Create or update geoip database configuration API -++++ -Create or update geoip database configuration -++++ - -Creates or updates an IP geolocation database configuration. - -IMPORTANT: The Maxmind `account_id` shown below requires a license key. Because the license key is sensitive information, -it is stored as a <> in {es} named `ingest.geoip.downloader.maxmind.license_key`. Only -one Maxmind license key is currently allowed per {es} cluster. A valid license key must be in the secure settings in order -to download from Maxmind. The license key setting does not take effect until all nodes are restarted. - -[source,console] ----- -PUT _ingest/geoip/database/my-database-id -{ - "name": "GeoIP2-Domain", - "maxmind": { - "account_id": "1025402" - } -} ----- -// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] - -[[put-geoip-database-api-request]] -==== {api-request-title} - -`PUT /_ingest/geoip/database/` - -[[put-geoip-database-api-prereqs]] -==== {api-prereq-title} - -* If the {es} {security-features} are enabled, you must have the -`manage` <> to use this API. - - -[[put-geoip-database-api-path-params]] -==== {api-path-parms-title} - -``:: -+ -__ -(Required, string) ID of the database configuration to create or update. - -[[put-geoip-database-api-query-params]] -==== {api-query-parms-title} - -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] - -[[put-geoip-database-api-request-body]] -==== {api-request-body-title} - -// tag::geoip-database-object[] -`name`:: -(Required, string) -The provider-assigned name of the IP geolocation database to download. - -``:: -(Required, a provider object and its associated configuration) -The configuration necessary to identify which IP geolocation provider to use to download -the database, as well as any provider-specific configuration necessary for such downloading. -+ -At present, the only supported provider is `maxmind`, and the maxmind provider -requires that an `account_id` (string) is configured. -// end::geoip-database-object[] - -[[geoip-database-configuration-licensing]] -==== Licensing - -Downloading databases from third party providers is a commercial feature that requires an -appropriate license. For more information, refer to https://www.elastic.co/subscriptions. diff --git a/docs/reference/ingest/apis/put-ip-location-database.asciidoc b/docs/reference/ingest/apis/put-ip-location-database.asciidoc new file mode 100644 index 0000000000000..e42d84752694c --- /dev/null +++ b/docs/reference/ingest/apis/put-ip-location-database.asciidoc @@ -0,0 +1,92 @@ +[[put-ip-location-database-api]] +=== Create or update IP geolocation database configuration API +++++ +Create or update IP geolocation database configuration +++++ + +Creates or updates an IP geolocation database configuration. + +IMPORTANT: The Maxmind `account_id` shown below requires a license key. Because the license key is sensitive information, +it is stored as a <> in {es} named `ingest.geoip.downloader.maxmind.license_key`. Only +one Maxmind license key is currently allowed per {es} cluster. A valid license key must be in the secure settings in order +to download from Maxmind. The license key setting does not take effect until all nodes are restarted or a +<> request is executed. + +[source,console] +---- +PUT _ingest/ip_location/database/my-database-1 +{ + "name": "GeoIP2-Domain", + "maxmind": { + "account_id": "1234567" + } +} +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] + +IMPORTANT: The IPinfo configuration shown below requires a token. Because the token is sensitive information, +it is stored as a <> in {es} named `ingest.ip_location.downloader.ipinfo.token`. Only +one IPinfo token is currently allowed per {es} cluster. A valid token must be in the secure settings in order +to download from IPinfo. The token setting does not take effect until all nodes are restarted or a +<> request is executed. + +[source,console] +---- +PUT _ingest/ip_location/database/my-database-2 +{ + "name": "standard_location", + "ipinfo": { + } +} +---- +// TEST[skip:we don't want to leak the enterprise-geoip-downloader task, which touching these APIs would cause. Therefore, skip this test.] + + +[[put-ip-location-database-api-request]] +==== {api-request-title} + +`PUT /_ingest/ip_location/database/` + +[[put-ip-location-database-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the +`manage` <> to use this API. + + +[[put-ip-location-database-api-path-params]] +==== {api-path-parms-title} + +``:: ++ +__ +(Required, string) ID of the database configuration to create or update. + +[[put-ip-location-database-api-query-params]] +==== {api-query-parms-title} + +include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +[[put-ip-location-database-api-request-body]] +==== {api-request-body-title} + +// tag::ip-location-database-object[] +`name`:: +(Required, string) +The provider-assigned name of the IP geolocation database to download. + +``:: +(Required, a provider object and its associated configuration) +The configuration necessary to identify which IP geolocation provider to use to download +the database, as well as any provider-specific configuration necessary for such downloading. ++ +At present, the only supported providers are `maxmind` and `ipinfo`. The maxmind provider +requires that an `account_id` (string) is configured. The ipinfo provider does not require +additional configuration in the request body. +// end::ip-location-database-object[] + +[[ip-location-database-configuration-licensing]] +==== Licensing + +Downloading databases from third party providers is a commercial feature that requires an +appropriate license. For more information, refer to https://www.elastic.co/subscriptions. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index e0568f500f268..506dff7891ad2 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1926,4 +1926,19 @@ Refer to <>. [role="exclude",id="remote-clusters-privileges"] === Configure roles and users for remote clusters -Refer to <>. \ No newline at end of file +Refer to <>. + +[role="exclude",id="put-geoip-database-api"] +=== Create or update geoip database configuration API + +Refer to <>. + +[role="exclude",id="get-geoip-database-api"] +=== Get geoip database configuration + +Refer to <>. + +[role="exclude",id="delete-geoip-database-api"] +=== Delete geoip database configuration API + +Refer to <>. From f2b38823603125ea40b86866f306540185938ae4 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 11 Nov 2024 19:55:07 +0100 Subject: [PATCH 34/95] Base search queue length on pool size (#116578) Base the queue length on the pool size. The number of tasks enqueued on search is proportional to its size in many cases so linearly increaasing the length of the queue makes it so a larger pool does not lead to an increasing chance of rejection under constant search load. --- .../threadpool/DefaultBuiltInExecutorBuilders.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java b/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java index a97d22a976631..32634043cfc98 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java +++ b/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java @@ -68,7 +68,7 @@ public Map getBuilders(Settings settings, int allocated settings, ThreadPool.Names.SEARCH, searchOrGetThreadPoolSize, - 1000, + searchOrGetThreadPoolSize * 1000, new EsExecutors.TaskTrackingConfig(true, searchAutoscalingEWMA) ) ); From b3424ff44cbddb5266516121ced09403424f2c98 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 11 Nov 2024 13:51:24 -0600 Subject: [PATCH 35/95] Excluding data stream indices from IndexDeprecationChecks.oldIndicesCheck() (#116527) --- .../xpack/deprecation/DeprecationChecks.java | 2 +- .../deprecation/DeprecationInfoAction.java | 7 +- .../deprecation/IndexDeprecationChecks.java | 24 +++-- .../DeprecationInfoActionResponseTests.java | 16 ++-- .../IndexDeprecationChecksTests.java | 92 +++++++++++++++++-- 5 files changed, 113 insertions(+), 28 deletions(-) diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index c80f26cda7b36..d13f3cda2a82c 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -90,7 +90,7 @@ private DeprecationChecks() {} NodeDeprecationChecks::checkWatcherBulkConcurrentRequestsSetting ); - static List> INDEX_SETTINGS_CHECKS = List.of( + static List> INDEX_SETTINGS_CHECKS = List.of( IndexDeprecationChecks::oldIndicesCheck, IndexDeprecationChecks::translogRetentionSettingCheck, IndexDeprecationChecks::checkIndexDataPath, diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java index cd26e23394e81..87d0bfb93e18c 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java @@ -274,7 +274,7 @@ public static DeprecationInfoAction.Response from( IndexNameExpressionResolver indexNameExpressionResolver, Request request, NodesDeprecationCheckResponse nodeDeprecationResponse, - List> indexSettingsChecks, + List> indexSettingsChecks, List> dataStreamChecks, List> clusterSettingsChecks, Map> pluginSettingIssues, @@ -293,7 +293,10 @@ public static DeprecationInfoAction.Response from( Map> indexSettingsIssues = new HashMap<>(); for (String concreteIndex : concreteIndexNames) { IndexMetadata indexMetadata = stateWithSkippedSettingsRemoved.getMetadata().index(concreteIndex); - List singleIndexIssues = filterChecks(indexSettingsChecks, c -> c.apply(indexMetadata)); + List singleIndexIssues = filterChecks( + indexSettingsChecks, + c -> c.apply(indexMetadata, stateWithSkippedSettingsRemoved) + ); if (singleIndexIssues.size() > 0) { indexSettingsIssues.put(concreteIndex, singleIndexIssues); } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index 3da32c7f5a4c2..8144d960df2e8 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.deprecation; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.time.DateFormatter; @@ -30,14 +31,15 @@ */ public class IndexDeprecationChecks { - static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata) { + static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata, ClusterState clusterState) { // TODO: this check needs to be revised. It's trivially true right now. IndexVersion currentCompatibilityVersion = indexMetadata.getCompatibilityVersion(); - if (currentCompatibilityVersion.before(IndexVersions.V_7_0_0)) { + // We intentionally exclude indices that are in data streams because they will be picked up by DataStreamDeprecationChecks + if (currentCompatibilityVersion.before(IndexVersions.V_8_0_0) && isNotDataStreamIndex(indexMetadata, clusterState)) { return new DeprecationIssue( DeprecationIssue.Level.CRITICAL, - "Old index with a compatibility version < 7.0", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + "breaking-changes-8.0.html", + "Old index with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", "This index has version: " + currentCompatibilityVersion.toReleaseVersion(), false, null @@ -46,7 +48,11 @@ static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata) { return null; } - static DeprecationIssue translogRetentionSettingCheck(IndexMetadata indexMetadata) { + private static boolean isNotDataStreamIndex(IndexMetadata indexMetadata, ClusterState clusterState) { + return clusterState.metadata().findDataStreams(indexMetadata.getIndex().getName()).isEmpty(); + } + + static DeprecationIssue translogRetentionSettingCheck(IndexMetadata indexMetadata, ClusterState clusterState) { final boolean softDeletesEnabled = IndexSettings.INDEX_SOFT_DELETES_SETTING.get(indexMetadata.getSettings()); if (softDeletesEnabled) { if (IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(indexMetadata.getSettings()) @@ -73,7 +79,7 @@ static DeprecationIssue translogRetentionSettingCheck(IndexMetadata indexMetadat return null; } - static DeprecationIssue checkIndexDataPath(IndexMetadata indexMetadata) { + static DeprecationIssue checkIndexDataPath(IndexMetadata indexMetadata, ClusterState clusterState) { if (IndexMetadata.INDEX_DATA_PATH_SETTING.exists(indexMetadata.getSettings())) { final String message = String.format( Locale.ROOT, @@ -88,7 +94,7 @@ static DeprecationIssue checkIndexDataPath(IndexMetadata indexMetadata) { return null; } - static DeprecationIssue storeTypeSettingCheck(IndexMetadata indexMetadata) { + static DeprecationIssue storeTypeSettingCheck(IndexMetadata indexMetadata, ClusterState clusterState) { final String storeType = IndexModule.INDEX_STORE_TYPE_SETTING.get(indexMetadata.getSettings()); if (IndexModule.Type.SIMPLEFS.match(storeType)) { return new DeprecationIssue( @@ -105,7 +111,7 @@ static DeprecationIssue storeTypeSettingCheck(IndexMetadata indexMetadata) { return null; } - static DeprecationIssue frozenIndexSettingCheck(IndexMetadata indexMetadata) { + static DeprecationIssue frozenIndexSettingCheck(IndexMetadata indexMetadata, ClusterState clusterState) { Boolean isIndexFrozen = FrozenEngine.INDEX_FROZEN.get(indexMetadata.getSettings()); if (Boolean.TRUE.equals(isIndexFrozen)) { String indexName = indexMetadata.getIndex().getName(); @@ -195,7 +201,7 @@ static List findInPropertiesRecursively( return issues; } - static DeprecationIssue deprecatedCamelCasePattern(IndexMetadata indexMetadata) { + static DeprecationIssue deprecatedCamelCasePattern(IndexMetadata indexMetadata, ClusterState clusterState) { List fields = new ArrayList<>(); fieldLevelMappingIssue( indexMetadata, diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java index 5750daa8e3673..67950f3b9f623 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java @@ -117,7 +117,9 @@ public void testFrom() throws IOException { boolean dataStreamIssueFound = randomBoolean(); DeprecationIssue foundIssue = createTestDeprecationIssue(); List> clusterSettingsChecks = List.of((s) -> clusterIssueFound ? foundIssue : null); - List> indexSettingsChecks = List.of((idx) -> indexIssueFound ? foundIssue : null); + List> indexSettingsChecks = List.of( + (idx, cs) -> indexIssueFound ? foundIssue : null + ); List> dataStreamChecks = List.of( (ds, cs) -> dataStreamIssueFound ? foundIssue : null ); @@ -211,7 +213,7 @@ public void testFromWithMergeableNodeIssues() throws IOException { DeprecationIssue foundIssue1 = createTestDeprecationIssue(metaMap1); DeprecationIssue foundIssue2 = createTestDeprecationIssue(foundIssue1, metaMap2); List> clusterSettingsChecks = Collections.emptyList(); - List> indexSettingsChecks = List.of((idx) -> null); + List> indexSettingsChecks = List.of((idx, cs) -> null); List> dataStreamChecks = List.of((ds, cs) -> null); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( @@ -276,10 +278,12 @@ public void testRemoveSkippedSettings() throws IOException { return null; })); AtomicReference visibleIndexSettings = new AtomicReference<>(); - List> indexSettingsChecks = Collections.unmodifiableList(Arrays.asList((idx) -> { - visibleIndexSettings.set(idx.getSettings()); - return null; - })); + List> indexSettingsChecks = Collections.unmodifiableList( + Arrays.asList((idx, cs) -> { + visibleIndexSettings.set(idx.getSettings()); + return null; + }) + ); AtomicInteger backingIndicesCount = new AtomicInteger(0); List> dataStreamChecks = Collections.unmodifiableList( Arrays.asList((ds, cs) -> { diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 18872d00d54a0..48cbef6831a2b 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -7,8 +7,15 @@ package org.elasticsearch.xpack.deprecation; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamMetadata; +import org.elasticsearch.cluster.metadata.DataStreamOptions; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -19,39 +26,89 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.collection.IsIterableContainingInOrder.contains; public class IndexDeprecationChecksTests extends ESTestCase { public void testOldIndicesCheck() { - IndexVersion createdWith = IndexVersion.fromId(1000099); + IndexVersion createdWith = IndexVersion.fromId(7170099); IndexMetadata indexMetadata = IndexMetadata.builder("test") .settings(settings(createdWith)) .numberOfShards(1) .numberOfReplicas(0) .build(); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(indexMetadata, true)) + .build(); DeprecationIssue expected = new DeprecationIssue( DeprecationIssue.Level.CRITICAL, - "Old index with a compatibility version < 7.0", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + "breaking-changes-8.0.html", + "Old index with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", "This index has version: " + createdWith.toReleaseVersion(), false, null ); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata, clusterState)); assertEquals(singletonList(expected), issues); } + public void testOldIndicesCheckDataStreamIndex() { + IndexVersion createdWith = IndexVersion.fromId(7170099); + IndexMetadata indexMetadata = IndexMetadata.builder(".ds-test") + .settings(settings(createdWith).put("index.hidden", true)) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + DataStream dataStream = new DataStream( + randomAlphaOfLength(10), + List.of(indexMetadata.getIndex()), + randomNegativeLong(), + Map.of(), + randomBoolean(), + false, + false, + randomBoolean(), + randomFrom(IndexMode.values()), + null, + randomFrom(DataStreamOptions.EMPTY, DataStreamOptions.FAILURE_STORE_DISABLED, DataStreamOptions.FAILURE_STORE_ENABLED, null), + List.of(), + randomBoolean(), + null + ); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata( + Metadata.builder() + .put(indexMetadata, true) + .customs( + Map.of( + DataStreamMetadata.TYPE, + new DataStreamMetadata( + ImmutableOpenMap.builder(Map.of("my-data-stream", dataStream)).build(), + ImmutableOpenMap.of() + ) + ) + ) + ) + .build(); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata, clusterState)); + assertThat(issues.size(), equalTo(0)); + } + public void testTranslogRetentionSettings() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat( issues, contains( @@ -81,7 +138,10 @@ public void testDefaultTranslogRetentionSettings() { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); } IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat(issues, empty()); } @@ -89,7 +149,10 @@ public void testIndexDataPathSetting() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(IndexMetadata.INDEX_DATA_PATH_SETTING.getKey(), createTempDir()); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); final String expectedUrl = "https://www.elastic.co/guide/en/elasticsearch/reference/7.13/breaking-changes-7.13.html#deprecate-shared-data-path-setting"; assertThat( @@ -111,7 +174,10 @@ public void testSimpleFSSetting() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "simplefs"); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat( issues, contains( @@ -133,7 +199,10 @@ public void testFrozenIndex() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(FrozenEngine.INDEX_FROZEN.getKey(), true); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat( issues, contains( @@ -175,7 +244,10 @@ public void testCamelCaseDeprecation() throws IOException { false, null ); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(simpleIndex)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(simpleIndex, ClusterState.EMPTY_STATE) + ); assertThat(issues, hasItem(expected)); } } From 23e51e2f458a65c521b5a610fb1e60f7b0269228 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 12 Nov 2024 07:57:08 +1100 Subject: [PATCH 36/95] Mute org.elasticsearch.upgrades.SearchStatesIT testBWCSearchStates #116617 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index ba810037130f7..652472e81b0ec 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -272,6 +272,9 @@ tests: - class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT method: test {categorize.Categorize} issue: https://github.com/elastic/elasticsearch/issues/116434 +- class: org.elasticsearch.upgrades.SearchStatesIT + method: testBWCSearchStates + issue: https://github.com/elastic/elasticsearch/issues/116617 # Examples: # From 82cecbb884c3efe56e527bd11a6e637c70995231 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 12 Nov 2024 07:57:18 +1100 Subject: [PATCH 37/95] Mute org.elasticsearch.upgrades.SearchStatesIT testCanMatch #116618 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 652472e81b0ec..9fd8b0bd90f36 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -275,6 +275,9 @@ tests: - class: org.elasticsearch.upgrades.SearchStatesIT method: testBWCSearchStates issue: https://github.com/elastic/elasticsearch/issues/116617 +- class: org.elasticsearch.upgrades.SearchStatesIT + method: testCanMatch + issue: https://github.com/elastic/elasticsearch/issues/116618 # Examples: # From ed2ef1ec2aa124e0b829d58df952e08c980488f7 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 12 Nov 2024 08:13:24 +1100 Subject: [PATCH 38/95] Mute org.elasticsearch.packaging.test.ArchiveGenerateInitialCredentialsTests test20NoAutoGenerationWhenAutoConfigurationDisabled #116619 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 9fd8b0bd90f36..69df72a522261 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -278,6 +278,9 @@ tests: - class: org.elasticsearch.upgrades.SearchStatesIT method: testCanMatch issue: https://github.com/elastic/elasticsearch/issues/116618 +- class: org.elasticsearch.packaging.test.ArchiveGenerateInitialCredentialsTests + method: test20NoAutoGenerationWhenAutoConfigurationDisabled + issue: https://github.com/elastic/elasticsearch/issues/116619 # Examples: # From 69aae9f4ed4592dde17657ee977da819eeffc722 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 12 Nov 2024 08:13:47 +1100 Subject: [PATCH 39/95] Mute org.elasticsearch.packaging.test.BootstrapCheckTests test20RunWithBootstrapChecks #116620 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 69df72a522261..e6e68061c9bbc 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -281,6 +281,9 @@ tests: - class: org.elasticsearch.packaging.test.ArchiveGenerateInitialCredentialsTests method: test20NoAutoGenerationWhenAutoConfigurationDisabled issue: https://github.com/elastic/elasticsearch/issues/116619 +- class: org.elasticsearch.packaging.test.BootstrapCheckTests + method: test20RunWithBootstrapChecks + issue: https://github.com/elastic/elasticsearch/issues/116620 # Examples: # From 89467b212edab1402a3ffaa63a3a3373641aef4d Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 11 Nov 2024 16:30:48 -0500 Subject: [PATCH 40/95] Clarify the vector files utilized for preloading (#116488) Adds clarification for vector preloading, what extension is to what storage kind, and that quantized vectors are stored in separate files allowing for individual preload. closes: https://github.com/elastic/elasticsearch/issues/116273 --- docs/reference/how-to/knn-search.asciidoc | 13 ++++++++++--- docs/reference/index-modules/store.asciidoc | 4 ++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/docs/reference/how-to/knn-search.asciidoc b/docs/reference/how-to/knn-search.asciidoc index e884c01dd3509..60c32cabdb5c1 100644 --- a/docs/reference/how-to/knn-search.asciidoc +++ b/docs/reference/how-to/knn-search.asciidoc @@ -95,13 +95,20 @@ and https://elasticsearch-benchmarks.elastic.co/#tracks/dense_vector[here] some of datasets and configurations that we use for our nightly benchmarks. [discrete] +[[dense-vector-preloading]] include::search-speed.asciidoc[tag=warm-fs-cache] The following file extensions are used for the approximate kNN search: +Each extension is broken down by the quantization types. -* `vec` and `veq` for vector values -* `vex` for HNSW graph -* `vem`, `vemf`, and `vemq` for metadata +* `vex` for the HNSW graph +* `vec` for all non-quantized vector values. This includes all element types: `float`, `byte`, and `bit`. +* `veq` for quantized vectors indexed with <>: `int4` or `int8` +* `veb` for binary vectors indexed with <>: `bbq` +* `vem`, `vemf`, `vemq`, and `vemb` for metadata, usually small and not a concern for preloading + +Generally, if you are using a quantized index, you should only preload the relevant quantized values and the HNSW graph. +Preloading the raw vectors is not necessary and might be counterproductive. [discrete] === Reduce the number of index segments diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index 9b30ba9dbde35..aba0850c76437 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -143,8 +143,8 @@ terms dictionaries, postings lists and points, which are the most important parts of the index for search and aggregations. For vector search, you use <>, -you might want to set the setting to vector search files: `["vec", "vex", "vem"]` -("vec" is used for vector values, "vex" – for HNSW graph, "vem" – for metadata). +you might want to set the setting to vector search files. See <> for a detailed +list of the files. Note that this setting can be dangerous on indices that are larger than the size of the main memory of the host, as it would cause the filesystem cache to be From 442da478a3853a0bb1b2adb4858a469757e034c1 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 12 Nov 2024 09:02:11 +1100 Subject: [PATCH 41/95] Mute org.elasticsearch.smoketest.MlWithSecurityIT test {yaml=ml/inference_crud/Test force delete given model referenced by pipeline} #116624 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index e6e68061c9bbc..62f84866ce0a8 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -284,6 +284,9 @@ tests: - class: org.elasticsearch.packaging.test.BootstrapCheckTests method: test20RunWithBootstrapChecks issue: https://github.com/elastic/elasticsearch/issues/116620 +- class: org.elasticsearch.smoketest.MlWithSecurityIT + method: test {yaml=ml/inference_crud/Test force delete given model referenced by pipeline} + issue: https://github.com/elastic/elasticsearch/issues/116624 # Examples: # From 28377cbaa69b264eead574f5b89fb31c902c1110 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Tue, 12 Nov 2024 10:05:20 +1100 Subject: [PATCH 42/95] Mute org.elasticsearch.packaging.test.DockerTests test011SecurityEnabledStatus #116628 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 62f84866ce0a8..3273b203b0982 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -287,6 +287,9 @@ tests: - class: org.elasticsearch.smoketest.MlWithSecurityIT method: test {yaml=ml/inference_crud/Test force delete given model referenced by pipeline} issue: https://github.com/elastic/elasticsearch/issues/116624 +- class: org.elasticsearch.packaging.test.DockerTests + method: test011SecurityEnabledStatus + issue: https://github.com/elastic/elasticsearch/issues/116628 # Examples: # From dd32cb6439691dfe327d0709558bdc62c83a67a1 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 11 Nov 2024 19:55:57 -0500 Subject: [PATCH 43/95] Document new ip_location processor (#116623) --- docs/reference/ingest/processors.asciidoc | 6 +- .../ingest/processors/geoip.asciidoc | 28 +-- .../ingest/processors/ip-location.asciidoc | 225 ++++++++++++++++++ 3 files changed, 243 insertions(+), 16 deletions(-) create mode 100644 docs/reference/ingest/processors/ip-location.asciidoc diff --git a/docs/reference/ingest/processors.asciidoc b/docs/reference/ingest/processors.asciidoc index 8f7cef06d12a0..f4fcc0fc84d0d 100644 --- a/docs/reference/ingest/processors.asciidoc +++ b/docs/reference/ingest/processors.asciidoc @@ -77,7 +77,10 @@ Computes a hash of the document’s content. Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. <>:: -Adds information about the geographical location of an IPv4 or IPv6 address. +Adds information about the geographical location of an IPv4 or IPv6 address from a Maxmind database. + +<>:: +Adds information about the geographical location of an IPv4 or IPv6 address from an ip geolocation database. <>:: Calculates the network direction given a source IP address, destination IP address, and a list of internal networks. @@ -245,6 +248,7 @@ include::processors/grok.asciidoc[] include::processors/gsub.asciidoc[] include::processors/html_strip.asciidoc[] include::processors/inference.asciidoc[] +include::processors/ip-location.asciidoc[] include::processors/join.asciidoc[] include::processors/json.asciidoc[] include::processors/kv.asciidoc[] diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 2eff56f87e826..78ebe3f5b5ee3 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -13,7 +13,7 @@ ASN IP geolocation databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[ CC BY-SA 4.0 license. It automatically downloads these databases if your nodes can connect to `storage.googleapis.com` domain and either: * `ingest.geoip.downloader.eager.download` is set to true -* your cluster has at least one pipeline with a `geoip` processor +* your cluster has at least one pipeline with a `geoip` or `ip_location` processor {es} automatically downloads updates for these databases from the Elastic GeoIP endpoint: @@ -25,10 +25,10 @@ If your cluster can't connect to the Elastic GeoIP endpoint or you want to manage your own updates, see <>. If you would like to have {es} download database files directly from Maxmind using your own provided -license key, see <>. +license key, see <>. If {es} can't connect to the endpoint for 30 days all updated databases will become -invalid. {es} will stop enriching documents with geoip data and will add `tags: ["_geoip_expired_database"]` +invalid. {es} will stop enriching documents with ip geolocation data and will add `tags: ["_geoip_expired_database"]` field instead. [[using-ingest-geoip]] @@ -40,11 +40,11 @@ field instead. |====== | Name | Required | Default | Description | `field` | yes | - | The field to get the IP address from for the geographical lookup. -| `target_field` | no | geoip | The field that will hold the geographical information looked up from the MaxMind database. -| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). -| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the geoip lookup. +| `target_field` | no | geoip | The field that will hold the geographical information looked up from the database. +| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). +| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the ip geolocation lookup. | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document -| `first_only` | no | `true` | If `true` only first found geoip data will be returned, even if `field` contains array +| `first_only` | no | `true` | If `true` only first found ip geolocation data, will be returned, even if `field` contains array | `download_database_on_pipeline_creation` | no | `true` | If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. |====== @@ -79,15 +79,13 @@ depend on what has been found and which properties were configured in `propertie `residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and `connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. -preview::["Do not use the GeoIP2 Anonymous IP, GeoIP2 Connection Type, GeoIP2 Domain, GeoIP2 ISP, and GeoIP2 Enterprise databases in production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] - Here is an example that uses the default city database and adds the geographical information to the `geoip` field based on the `ip` field: [source,console] -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -138,7 +136,7 @@ this database is downloaded automatically. So this: -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -190,7 +188,7 @@ cannot be found: -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -256,7 +254,7 @@ PUT my_ip_locations -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -429,7 +427,7 @@ The `geoip` processor supports the following setting: The maximum number of results that should be cached. Defaults to `1000`. -Note that these settings are node settings and apply to all `geoip` processors, i.e. there is one cache for all defined `geoip` processors. +Note that these settings are node settings and apply to all `geoip` and `ip_location` processors, i.e. there is a single cache for all such processors. [[geoip-cluster-settings]] ===== Cluster settings @@ -458,7 +456,7 @@ each node's <> at `$ES_TMPDIR/geoip-databases/IP Location +++++ + +The `ip_location` processor adds information about the geographical location of an +IPv4 or IPv6 address. + +[[ip-location-automatic-updates]] +By default, the processor uses the GeoLite2 City, GeoLite2 Country, and GeoLite2 +ASN IP geolocation databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[MaxMind], shared under the +CC BY-SA 4.0 license. It automatically downloads these databases if your nodes can connect to `storage.googleapis.com` domain and either: + +* `ingest.geoip.downloader.eager.download` is set to true +* your cluster has at least one pipeline with a `geoip` or `ip_location` processor + +{es} automatically downloads updates for these databases from the Elastic GeoIP +endpoint: +https://geoip.elastic.co/v1/database?elastic_geoip_service_tos=agree[https://geoip.elastic.co/v1/database]. +To get download statistics for these updates, use the <>. + +If your cluster can't connect to the Elastic GeoIP endpoint or you want to +manage your own updates, see <>. + +If you would like to have {es} download database files directly from Maxmind using your own provided +license key, see <>. + +If {es} can't connect to the endpoint for 30 days all updated databases will become +invalid. {es} will stop enriching documents with ip geolocation data and will add `tags: ["_ip_location_expired_database"]` +field instead. + +[[using-ingest-ip-location]] +==== Using the `ip_location` Processor in a Pipeline + +[[ingest-ip-location-options]] +.`ip-location` options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to get the IP address from for the geographical lookup. +| `target_field` | no | ip_location | The field that will hold the geographical information looked up from the database. +| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). +| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the ip geolocation lookup. +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +| `first_only` | no | `true` | If `true` only first found ip geolocation data, will be returned, even if `field` contains array +| `download_database_on_pipeline_creation` | no | `true` | If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. +|====== + +*Depends on what is available in `database_file`: + +* If a GeoLite2 City or GeoIP2 City database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, and `accuracy_radius`. The fields actually added depend on what has been found and which properties were configured in `properties`. +* If a GeoLite2 Country or GeoIP2 Country database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, and `continent_name`. The fields actually added depend on what has been found +and which properties were configured in `properties`. +* If the GeoLite2 ASN database is used, then the following fields may be added under the `target_field`: `ip`, +`asn`, `organization_name` and `network`. The fields actually added depend on what has been found and which properties were configured +in `properties`. +* If the GeoIP2 Anonymous IP database is used, then the following fields may be added under the `target_field`: `ip`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, and `residential_proxy`. The fields actually added +depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Connection Type database is used, then the following fields may be added under the `target_field`: `ip`, and +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Domain database is used, then the following fields may be added under the `target_field`: `ip`, and `domain`. +The fields actually added depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 ISP database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, +`organization_name`, `network`, `isp`, `isp_organization_name`, `mobile_country_code`, and `mobile_network_code`. The fields actually added +depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, `accuracy_radius`, `country_confidence`, `city_confidence`, `postal_confidence`, `asn`, `organization_name`, `network`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, +`residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. + +Here is an example that uses the default city database and adds the geographical information to the `ip_location` field based on the `ip` field: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip" + } + } + ] +} +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "89.160.20.128" +} +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +Which returns: + +[source,console-result] +-------------------------------------------------- +{ + "found": true, + "_index": "my-index-000001", + "_id": "my_id", + "_version": 1, + "_seq_no": 55, + "_primary_term": 1, + "_source": { + "ip": "89.160.20.128", + "ip_location": { + "continent_name": "Europe", + "country_name": "Sweden", + "country_iso_code": "SE", + "city_name" : "Linköping", + "region_iso_code" : "SE-E", + "region_name" : "Östergötland County", + "location": { "lat": 58.4167, "lon": 15.6167 } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term":1/"_primary_term" : $body._primary_term/] + +Here is an example that uses the default country database and adds the +geographical information to the `geo` field based on the `ip` field. Note that +this database is downloaded automatically. So this: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip", + "target_field" : "geo", + "database_file" : "GeoLite2-Country.mmdb" + } + } + ] +} +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "89.160.20.128" +} +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +returns this: + +[source,console-result] +-------------------------------------------------- +{ + "found": true, + "_index": "my-index-000001", + "_id": "my_id", + "_version": 1, + "_seq_no": 65, + "_primary_term": 1, + "_source": { + "ip": "89.160.20.128", + "geo": { + "continent_name": "Europe", + "country_name": "Sweden", + "country_iso_code": "SE" + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] + + +Not all IP addresses find geo information from the database, When this +occurs, no `target_field` is inserted into the document. + +Here is an example of what documents will be indexed as when information for "80.231.5.0" +cannot be found: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip" + } + } + ] +} + +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "80.231.5.0" +} + +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +Which returns: + +[source,console-result] +-------------------------------------------------- +{ + "_index" : "my-index-000001", + "_id" : "my_id", + "_version" : 1, + "_seq_no" : 71, + "_primary_term": 1, + "found" : true, + "_source" : { + "ip" : "80.231.5.0" + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] From eb6d47f0f927a70aeba11a10a10c6527a63a8be1 Mon Sep 17 00:00:00 2001 From: Fang Xing <155562079+fang-xing-esql@users.noreply.github.com> Date: Mon, 11 Nov 2024 20:33:03 -0500 Subject: [PATCH 44/95] [ES|QL] To_DatePeriod and To_TimeDuration return better error messages on union_type fields (#114934) * better error messages with union_type fields --- docs/changelog/114934.yaml | 6 ++++ .../xpack/esql/analysis/Analyzer.java | 11 ++++++++ .../convert/FoldablesConvertFunction.java | 3 +- .../xpack/esql/analysis/VerifierTests.java | 28 +++++++++++++++++++ 4 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/114934.yaml diff --git a/docs/changelog/114934.yaml b/docs/changelog/114934.yaml new file mode 100644 index 0000000000000..68628993b1c80 --- /dev/null +++ b/docs/changelog/114934.yaml @@ -0,0 +1,6 @@ +pr: 114934 +summary: "[ES|QL] To_DatePeriod and To_TimeDuration return better error messages on\ + \ `union_type` fields" +area: ES|QL +type: bug +issues: [] diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 9039177e0643d..9c173795d0ab1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -51,6 +51,7 @@ import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.FoldablesConvertFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; @@ -1226,6 +1227,16 @@ private Expression resolveConvertFunction(AbstractConvertFunction convert, List< if (convert.field() instanceof FieldAttribute fa && fa.field() instanceof InvalidMappedField imf) { HashMap typeResolutions = new HashMap<>(); Set supportedTypes = convert.supportedTypes(); + if (convert instanceof FoldablesConvertFunction fcf) { + // FoldablesConvertFunction does not accept fields as inputs, they only accept constants + String unresolvedMessage = "argument of [" + + fcf.sourceText() + + "] must be a constant, received [" + + Expressions.name(fa) + + "]"; + Expression ua = new UnresolvedAttribute(fa.source(), fa.name(), unresolvedMessage); + return fcf.replaceChildren(Collections.singletonList(ua)); + } imf.types().forEach(type -> { if (supportedTypes.contains(type.widenSmallNumeric())) { TypeResolutionKey key = new TypeResolutionKey(fa.name(), type); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java index 6e2b5bb63532d..8f43a6481db07 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java @@ -59,7 +59,8 @@ protected final TypeResolution resolveType() { @Override protected final Map factories() { - // TODO if a union type field is provided as an input, the correct error message is not shown, #112668 is a follow up + // This is used by ResolveUnionTypes, which is expected to be applied to ES fields only + // FoldablesConvertFunction takes only constants as inputs, so this is empty return Map.of(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index d6cda4a3a9ff7..0a34d6cd848bb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -244,6 +244,34 @@ public void testUnsupportedAndMultiTypedFields() { + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | where multi_typed is not null", analyzer) ); + + for (String functionName : List.of("to_timeduration", "to_dateperiod")) { + String lineNumber = functionName.equalsIgnoreCase("to_timeduration") ? "47" : "45"; + String errorType = functionName.equalsIgnoreCase("to_timeduration") ? "time_duration" : "date_period"; + assertEquals( + "1:" + lineNumber + ": Cannot use field [unsupported] with unsupported type [flattened]", + error("from test* | eval x = now() + " + functionName + "(unsupported)", analyzer) + ); + assertEquals( + "1:" + lineNumber + ": argument of [" + functionName + "(multi_typed)] must be a constant, received [multi_typed]", + error("from test* | eval x = now() + " + functionName + "(multi_typed)", analyzer) + ); + assertThat( + error("from test* | eval x = unsupported, y = now() + " + functionName + "(x)", analyzer), + containsString("1:23: Cannot use field [unsupported] with unsupported type [flattened]") + ); + assertThat( + error("from test* | eval x = multi_typed, y = now() + " + functionName + "(x)", analyzer), + containsString( + "1:48: argument of [" + + functionName + + "(x)] must be [" + + errorType + + " or string], " + + "found value [x] type [unsupported]" + ) + ); + } } public void testRoundFunctionInvalidInputs() { From fa4e950852ed8b37570806c225e87cb7bcd1fe7a Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 12 Nov 2024 07:42:23 +0100 Subject: [PATCH 45/95] Deduplicate non-empty InternalAggregation metadata when deserializing (#116589) --- .../search/aggregations/InternalAggregation.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 38cab1761d409..b829afb0c23b0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -35,7 +35,6 @@ */ public abstract class InternalAggregation implements Aggregation, NamedWriteable { protected final String name; - protected final Map metadata; /** @@ -53,12 +52,14 @@ protected InternalAggregation(String name, Map metadata) { */ protected InternalAggregation(StreamInput in) throws IOException { final String name = in.readString(); + final Map metadata = in.readGenericMap(); if (in instanceof DelayableWriteable.Deduplicator d) { this.name = d.deduplicate(name); + this.metadata = metadata == null || metadata.isEmpty() ? metadata : d.deduplicate(metadata); } else { this.name = name; + this.metadata = metadata; } - metadata = in.readGenericMap(); } @Override From bfb30d2e72f9980a1f9d917ad6f1e3acf4bbff00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Tue, 12 Nov 2024 08:42:34 +0100 Subject: [PATCH 46/95] [DOCS] Remove tech preview from bulk create/update/delete roles (#116601) Mark bulk create/update/delete roles GA in 9.0 and 8.17 --- docs/reference/rest-api/security/bulk-create-roles.asciidoc | 1 - docs/reference/rest-api/security/bulk-delete-roles.asciidoc | 1 - 2 files changed, 2 deletions(-) diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc index a198f49383907..560e8b74cdd2c 100644 --- a/docs/reference/rest-api/security/bulk-create-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -1,7 +1,6 @@ [role="xpack"] [[security-api-bulk-put-role]] === Bulk create or update roles API -preview::[] ++++ Bulk create or update roles API ++++ diff --git a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc index a782b5e37fcb9..b9978c89bef3a 100644 --- a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc @@ -1,7 +1,6 @@ [role="xpack"] [[security-api-bulk-delete-role]] === Bulk delete roles API -preview::[] ++++ Bulk delete roles API ++++ From f121e09fbbfe5adee3198620dfd3840e2c792297 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Tue, 12 Nov 2024 10:59:20 +0100 Subject: [PATCH 47/95] [DOCS] Connectors 8.16.0 release notes (#115856) --- .../docs/connectors-release-notes.asciidoc | 10 +++- .../connectors-release-notes-8.16.0.asciidoc | 53 +++++++++++++++++++ 2 files changed, 61 insertions(+), 2 deletions(-) create mode 100644 docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc diff --git a/docs/reference/connector/docs/connectors-release-notes.asciidoc b/docs/reference/connector/docs/connectors-release-notes.asciidoc index 723671b049bf2..e1ed082365c00 100644 --- a/docs/reference/connector/docs/connectors-release-notes.asciidoc +++ b/docs/reference/connector/docs/connectors-release-notes.asciidoc @@ -4,7 +4,13 @@ Release notes ++++ -[INFO] +[NOTE] ==== -Prior to version 8.16.0, the connector release notes were published as part of the https://www.elastic.co/guide/en/enterprise-search/current/changelog.html[Enterprise Search documentation]. +Prior to version *8.16.0*, the connector release notes were published as part of the {enterprise-search-ref}/changelog.html[Enterprise Search documentation]. ==== + +*Release notes*: + +* <> + +include::release-notes/connectors-release-notes-8.16.0.asciidoc[] diff --git a/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc b/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc new file mode 100644 index 0000000000000..7608336073176 --- /dev/null +++ b/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc @@ -0,0 +1,53 @@ +[[es-connectors-release-notes-8-16-0]] +=== 8.16.0 connectors release notes + +[discrete] +[[es-connectors-release-notes-deprecation-notice]] +==== Deprecation notices + +* *Direct index access for connectors and sync jobs* ++ +IMPORTANT: Directly accessing connector and sync job state through `.elastic-connectors*` indices is deprecated, and will be disallowed entirely in a future release. + +* Instead, the Elasticsearch Connector APIs should be used. Connectors framework code now uses the <> by default. +See https://github.com/elastic/connectors/pull/2884[*PR 2902*]. + +* *Docker `enterprise-search` namespace deprecation* ++ +IMPORTANT: The `enterprise-search` Docker namespace is deprecated and will be discontinued in a future release. ++ +Starting in `8.16.0`, Docker images are being transitioned to the new `integrations` namespace, which will become the sole location for future releases. This affects the https://github.com/elastic/connectors[Elastic Connectors] and https://github.com/elastic/data-extraction-service[Elastic Data Extraction Service]. ++ +During this transition period, images are published to both namespaces: ++ +** *Example*: ++ +Deprecated namespace:: +`docker.elastic.co/enterprise-search/elastic-connectors:v8.16.0` ++ +New namespace:: +`docker.elastic.co/integrations/elastic-connectors:v8.16.0` ++ +Users should migrate to the new `integrations` namespace as soon as possible to ensure continued access to future releases. + +[discrete] +[[es-connectors-release-notes-8-16-0-enhancements]] +==== Enhancements + +* Docker images now use Chainguard's Wolfi base image (`docker.elastic.co/wolfi/jdk:openjdk-11-dev`), replacing the previous `ubuntu:focal` base. + +* The Sharepoint Online connector now works with the `Sites.Selected` permission instead of the broader permission `Sites.Read.All`. +See https://github.com/elastic/connectors/pull/2762[*PR 2762*]. + +* Starting in 8.16.0, connectors will start using proper SEMVER, with `MAJOR.MINOR.PATCH`, which aligns with Elasticsearch/Kibana versions. This drops the previous `.BUILD` suffix, which we used to release connectors between Elastic stack releases. Going forward, these inter-stack-release releases will be suffixed instead with `+`, aligning with Elastic Agent and conforming to SEMVER. +See https://github.com/elastic/connectors/pull/2749[*PR 2749*]. + +* Connector logs now use UTC timestamps, instead of machine-local timestamps. This only impacts logging output. +See https://github.com/elastic/connectors/pull/2695[*PR 2695*]. + +[discrete] +[[es-connectors-release-notes-8-16-0-bug-fixes]] +==== Bug fixes + +* The Dropbox connector now fetches the files from team shared folders. +See https://github.com/elastic/connectors/pull/2718[*PR 2718*]. \ No newline at end of file From d34c5630cae240dafb2134441cf132d4280e1ce7 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 12 Nov 2024 10:43:19 +0000 Subject: [PATCH 48/95] [ML] Avoid the .ml-stats index in post test cleanup (#116476) Fixes ml yaml rest tests failing in the post clean up with a search_phase_execution_exception against the .ml-stats index. The fix is to use another method to find reference ingest pipelines avoid the call to _ml/trained_models/_stats --- muted-tests.yml | 51 ------------------- .../integration/MlRestTestStateCleaner.java | 44 ++++++++-------- 2 files changed, 21 insertions(+), 74 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 3273b203b0982..ddd806d49ae5f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -17,9 +17,6 @@ tests: - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_processor/Test create processor with missing mandatory fields} - issue: https://github.com/elastic/elasticsearch/issues/112191 - class: org.elasticsearch.xpack.esql.action.ManyShardsIT method: testRejection issue: https://github.com/elastic/elasticsearch/issues/112406 @@ -142,9 +139,6 @@ tests: - class: org.elasticsearch.search.SearchServiceTests method: testParseSourceValidation issue: https://github.com/elastic/elasticsearch/issues/115936 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test delete given model referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/115970 - class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT method: testReindexWithShutdown issue: https://github.com/elastic/elasticsearch/issues/115996 @@ -168,48 +162,27 @@ tests: - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT method: testLookbackWithIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/116127 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test delete given model with alias referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116133 - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {categorize.Categorize SYNC} issue: https://github.com/elastic/elasticsearch/issues/113054 - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {categorize.Categorize ASYNC} issue: https://github.com/elastic/elasticsearch/issues/113055 -- class: org.elasticsearch.xpack.inference.InferenceRestIT - method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} - issue: https://github.com/elastic/elasticsearch/issues/114376 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test force delete given model with alias referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116136 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Test start already started transform} issue: https://github.com/elastic/elasticsearch/issues/98802 - class: org.elasticsearch.action.search.SearchPhaseControllerTests method: testProgressListener issue: https://github.com/elastic/elasticsearch/issues/116149 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/forecast/Test forecast unknown job} - issue: https://github.com/elastic/elasticsearch/issues/116150 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=terms_enum/10_basic/Test security} issue: https://github.com/elastic/elasticsearch/issues/116178 - class: org.elasticsearch.search.basic.SearchWithRandomDisconnectsIT method: testSearchWithRandomDisconnects issue: https://github.com/elastic/elasticsearch/issues/116175 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/start_stop_datafeed/Test start datafeed given index pattern with no matching indices} - issue: https://github.com/elastic/elasticsearch/issues/116220 - class: org.elasticsearch.search.basic.SearchWhileRelocatingIT method: testSearchAndRelocateConcurrentlyRandomReplicas issue: https://github.com/elastic/elasticsearch/issues/116145 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/filter_crud/Test update filter} - issue: https://github.com/elastic/elasticsearch/issues/116271 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/get_datafeeds/Test explicit get all datafeeds} - issue: https://github.com/elastic/elasticsearch/issues/116284 - class: org.elasticsearch.xpack.deprecation.DeprecationHttpIT method: testDeprecatedSettingsReturnWarnings issue: https://github.com/elastic/elasticsearch/issues/108628 @@ -231,24 +204,9 @@ tests: - class: org.elasticsearch.threadpool.SimpleThreadPoolIT method: testThreadPoolMetrics issue: https://github.com/elastic/elasticsearch/issues/108320 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/jobs_crud/Test put job deprecated bucket span} - issue: https://github.com/elastic/elasticsearch/issues/116419 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/explain_data_frame_analytics/Test both job id and body} - issue: https://github.com/elastic/elasticsearch/issues/116433 -- class: org.elasticsearch.smoketest.MlWithSecurityIT - method: test {yaml=ml/inference_crud/Test force delete given model with alias referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116443 - class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT method: testILMDownsampleRollingRestart issue: https://github.com/elastic/elasticsearch/issues/114233 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/data_frame_analytics_crud/Test put config with unknown field in outlier detection analysis} - issue: https://github.com/elastic/elasticsearch/issues/116458 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/evaluate_data_frame/Test outlier_detection with query} - issue: https://github.com/elastic/elasticsearch/issues/116484 - class: org.elasticsearch.xpack.kql.query.KqlQueryBuilderTests issue: https://github.com/elastic/elasticsearch/issues/116487 - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests @@ -263,12 +221,6 @@ tests: - class: org.elasticsearch.xpack.logsdb.qa.StandardVersusLogsIndexModeRandomDataDynamicMappingChallengeRestIT method: testMatchAllQuery issue: https://github.com/elastic/elasticsearch/issues/116536 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test force delete given model referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116555 -- class: org.elasticsearch.smoketest.MlWithSecurityIT - method: test {yaml=ml/data_frame_analytics_crud/Test delete given stopped config} - issue: https://github.com/elastic/elasticsearch/issues/116608 - class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT method: test {categorize.Categorize} issue: https://github.com/elastic/elasticsearch/issues/116434 @@ -284,9 +236,6 @@ tests: - class: org.elasticsearch.packaging.test.BootstrapCheckTests method: test20RunWithBootstrapChecks issue: https://github.com/elastic/elasticsearch/issues/116620 -- class: org.elasticsearch.smoketest.MlWithSecurityIT - method: test {yaml=ml/inference_crud/Test force delete given model referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116624 - class: org.elasticsearch.packaging.test.DockerTests method: test011SecurityEnabledStatus issue: https://github.com/elastic/elasticsearch/issues/116628 diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java index 6f6224d505327..25d9509ecdc7a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java @@ -10,14 +10,15 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; +import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasSize; public class MlRestTestStateCleaner { @@ -30,24 +31,29 @@ public MlRestTestStateCleaner(Logger logger, RestClient adminClient) { } public void resetFeatures() throws IOException { - waitForMlStatsIndexToInitialize(); - deleteAllTrainedModelIngestPipelines(); + deletePipelinesWithInferenceProcessors(); // This resets all features, not just ML, but they should have been getting reset between tests anyway so it shouldn't matter adminClient.performRequest(new Request("POST", "/_features/_reset")); } @SuppressWarnings("unchecked") - private void deleteAllTrainedModelIngestPipelines() throws IOException { - final Request getAllTrainedModelStats = new Request("GET", "/_ml/trained_models/_stats"); - getAllTrainedModelStats.addParameter("size", "10000"); - final Response trainedModelsStatsResponse = adminClient.performRequest(getAllTrainedModelStats); + private void deletePipelinesWithInferenceProcessors() throws IOException { + final Response pipelinesResponse = adminClient.performRequest(new Request("GET", "/_ingest/pipeline")); + final Map pipelines = ESRestTestCase.entityAsMap(pipelinesResponse); + + var pipelinesWithInferenceProcessors = new HashSet(); + for (var entry : pipelines.entrySet()) { + var pipelineDef = (Map) entry.getValue(); // each top level object is a separate pipeline + var processors = (List>) pipelineDef.get("processors"); + for (var processor : processors) { + assertThat(processor.entrySet(), hasSize(1)); + if ("inference".equals(processor.keySet().iterator().next())) { + pipelinesWithInferenceProcessors.add(entry.getKey()); + } + } + } - final List> pipelines = (List>) XContentMapValues.extractValue( - "trained_model_stats.ingest.pipelines", - ESRestTestCase.entityAsMap(trainedModelsStatsResponse) - ); - Set pipelineIds = pipelines.stream().flatMap(m -> m.keySet().stream()).collect(Collectors.toSet()); - for (String pipelineId : pipelineIds) { + for (String pipelineId : pipelinesWithInferenceProcessors) { try { adminClient.performRequest(new Request("DELETE", "/_ingest/pipeline/" + pipelineId)); } catch (Exception ex) { @@ -55,12 +61,4 @@ private void deleteAllTrainedModelIngestPipelines() throws IOException { } } } - - private void waitForMlStatsIndexToInitialize() throws IOException { - ESRestTestCase.ensureHealth(adminClient, ".ml-stats-*", (request) -> { - request.addParameter("wait_for_no_initializing_shards", "true"); - request.addParameter("level", "shards"); - request.addParameter("timeout", "30s"); - }); - } } From bcf1bd4c969eebfd1a55dbcd078060ba1522de94 Mon Sep 17 00:00:00 2001 From: Iraklis Psaroudakis Date: Tue, 12 Nov 2024 13:08:04 +0200 Subject: [PATCH 49/95] Ensure Fleet REST yaml tests work in serverless (#115869) Use new gradle plugin for the yaml tests so they can be executed in serverless CI as well. Relates ES-8275 --- .../action/search/SearchRequestBuilder.java | 8 +++++ x-pack/plugin/fleet/qa/rest/build.gradle | 35 +++++++++++++------ .../xpack/fleet/FleetRestIT.java | 24 +++++++++++-- .../test/fleet/20_wait_for_checkpoints.yml | 5 +-- .../yamlRestTest/resources}/roles.yml | 0 5 files changed, 57 insertions(+), 15 deletions(-) rename x-pack/plugin/fleet/qa/rest/{ => src/yamlRestTest/resources}/roles.yml (100%) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index afbfe129c302e..2927c394da3d4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -130,6 +130,14 @@ public SearchRequestBuilder setWaitForCheckpoints(Map waitForChe return this; } + /** + * Set the timeout for the {@link #setWaitForCheckpoints(Map)} request. + */ + public SearchRequestBuilder setWaitForCheckpointsTimeout(final TimeValue waitForCheckpointsTimeout) { + request.setWaitForCheckpointsTimeout(waitForCheckpointsTimeout); + return this; + } + /** * Specifies what type of requested indices to ignore and wildcard indices expressions. *

diff --git a/x-pack/plugin/fleet/qa/rest/build.gradle b/x-pack/plugin/fleet/qa/rest/build.gradle index fda9251c7ef34..dec624bc3cc56 100644 --- a/x-pack/plugin/fleet/qa/rest/build.gradle +++ b/x-pack/plugin/fleet/qa/rest/build.gradle @@ -1,8 +1,15 @@ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ -dependencies { - yamlRestTestImplementation(testArtifact(project(xpackModule('core')))) -} +import org.elasticsearch.gradle.internal.info.BuildParams + +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-test-artifact' restResources { restApi { @@ -10,11 +17,17 @@ restResources { } } -testClusters.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.security.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - extraConfigFile 'roles.yml', file('roles.yml') - user username: 'elastic_admin', password: 'admin-password' - user username: 'fleet_unprivileged_secrets', password: 'password', role: 'unprivileged_secrets' +artifacts { + restXpackTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} + +tasks.named('yamlRestTest') { + usesDefaultDistribution() +} +tasks.named('yamlRestCompatTest') { + usesDefaultDistribution() +} +if (BuildParams.inFipsJvm){ + // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC + tasks.named("yamlRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java index 202149abf11e1..bc49649bc1139 100644 --- a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java +++ b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java @@ -12,8 +12,12 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class FleetRestIT extends ESClientYamlSuiteTestCase { @@ -21,14 +25,30 @@ public FleetRestIT(final ClientYamlTestCandidate testCandidate) { super(testCandidate); } + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "basic") + .setting("xpack.security.enabled", "true") + .rolesFile(Resource.fromClasspath("roles.yml")) + .user("elastic_admin", "admin-password", "superuser", true) + .user("fleet_unprivileged_secrets", "password", "unprivileged_secrets", true) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected Settings restClientSettings() { - String authentication = basicAuthHeaderValue("elastic_admin", new SecureString("admin-password".toCharArray())); - return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", authentication).build(); + String token = basicAuthHeaderValue("elastic_admin", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } @ParametersFactory public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + } diff --git a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml index 5610502a65d23..4c168c8feb0cd 100644 --- a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml +++ b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml @@ -105,6 +105,7 @@ setup: index: "test-after-refresh" allow_partial_search_results: false wait_for_checkpoints: 2 + wait_for_checkpoints_timeout: 1m body: { query: { match_all: {} } } --- @@ -115,7 +116,7 @@ setup: body: - { "allow_partial_search_results": false, wait_for_checkpoints: 1 } - { query: { match_all: { } } } - - { "allow_partial_search_results": false, wait_for_checkpoints: 2 } + - { "allow_partial_search_results": false, wait_for_checkpoints: 2, wait_for_checkpoints_timeout: 1m } - { query: { match_all: { } } } - match: { responses.0._shards.successful: 1 } @@ -128,7 +129,7 @@ setup: - {query: { match_all: {} } } - { "index": "test-alias", "allow_partial_search_results": false, wait_for_checkpoints: 1 } - { query: { match_all: { } } } - - {"index": "test-refresh-disabled", "allow_partial_search_results": false, wait_for_checkpoints: 2} + - { "index": "test-refresh-disabled", "allow_partial_search_results": false, wait_for_checkpoints: 2, wait_for_checkpoints_timeout: 1m } - {query: { match_all: {} } } - match: { responses.0._shards.successful: 1 } diff --git a/x-pack/plugin/fleet/qa/rest/roles.yml b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/roles.yml similarity index 100% rename from x-pack/plugin/fleet/qa/rest/roles.yml rename to x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/roles.yml From cca70d7eff1540fad2ff19081aac6f5f82cc68f5 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 12 Nov 2024 11:14:06 +0000 Subject: [PATCH 50/95] [ML] Batch the chunks (#115477) Models running on an ml node have a queue of requests, when that queue is full new requests are rejected. A large document can chunk into hundreds of requests and in extreme cases a single large document can overflow the queue. Avoid this by batches of chunks keeping certain number of requests in flight. --- .../ElasticsearchInternalService.java | 103 ++++++++++++--- .../EmbeddingRequestChunkerTests.java | 13 ++ .../ElasticsearchInternalServiceTests.java | 122 ++++++++++++++++-- 3 files changed, 205 insertions(+), 33 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 83249266c79ab..fe83acc8574aa 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -68,6 +68,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Stream; @@ -680,25 +681,13 @@ public void chunkedInfer( esModel.getConfigurations().getChunkingSettings() ).batchRequestsWithListeners(listener); - for (var batch : batchedRequests) { - var inferenceRequest = buildInferenceRequest( - esModel.mlNodeDeploymentId(), - EmptyConfigUpdate.INSTANCE, - batch.batch().inputs(), - inputType, - timeout - ); - - ActionListener mlResultsListener = batch.listener() - .delegateFailureAndWrap( - (l, inferenceResult) -> translateToChunkedResult(model.getTaskType(), inferenceResult.getInferenceResults(), l) - ); - - var maybeDeployListener = mlResultsListener.delegateResponse( - (l, exception) -> maybeStartDeployment(esModel, exception, inferenceRequest, mlResultsListener) - ); - - client.execute(InferModelAction.INSTANCE, inferenceRequest, maybeDeployListener); + if (batchedRequests.isEmpty()) { + listener.onResponse(List.of()); + } else { + // Avoid filling the inference queue by executing the batches in series + // Each batch contains up to EMBEDDING_MAX_BATCH_SIZE inference request + var sequentialRunner = new BatchIterator(esModel, inputType, timeout, batchedRequests); + sequentialRunner.run(); } } else { listener.onFailure(notElasticsearchModelException(model)); @@ -1018,6 +1007,82 @@ static TaskType inferenceConfigToTaskType(InferenceConfig config) { } } + /** + * Iterates over the batch executing a limited number requests at a time to avoid + * filling the ML node inference queue. + * + * First, a single request is executed, which can also trigger deploying a model + * if necessary. When this request is successfully executed, a callback executes + * N requests in parallel next. Each of these requests also has a callback that + * executes one more request, so that at all time N requests are in-flight. This + * continues until all requests are executed. + */ + class BatchIterator { + private static final int NUM_REQUESTS_INFLIGHT = 20; // * batch size = 200 + + private final AtomicInteger index = new AtomicInteger(); + private final ElasticsearchInternalModel esModel; + private final List requestAndListeners; + private final InputType inputType; + private final TimeValue timeout; + + BatchIterator( + ElasticsearchInternalModel esModel, + InputType inputType, + TimeValue timeout, + List requestAndListeners + ) { + this.esModel = esModel; + this.requestAndListeners = requestAndListeners; + this.inputType = inputType; + this.timeout = timeout; + } + + void run() { + // The first request may deploy the model, and upon completion runs + // NUM_REQUESTS_INFLIGHT in parallel. + inferenceExecutor.execute(() -> inferBatch(NUM_REQUESTS_INFLIGHT, true)); + } + + private void inferBatch(int runAfterCount, boolean maybeDeploy) { + int batchIndex = index.getAndIncrement(); + if (batchIndex >= requestAndListeners.size()) { + return; + } + executeRequest(batchIndex, maybeDeploy, () -> { + for (int i = 0; i < runAfterCount; i++) { + // Subsequent requests may not deploy the model, because the first request + // already did so. Upon completion, it runs one more request. + inferenceExecutor.execute(() -> inferBatch(1, false)); + } + }); + } + + private void executeRequest(int batchIndex, boolean maybeDeploy, Runnable runAfter) { + EmbeddingRequestChunker.BatchRequestAndListener batch = requestAndListeners.get(batchIndex); + var inferenceRequest = buildInferenceRequest( + esModel.mlNodeDeploymentId(), + EmptyConfigUpdate.INSTANCE, + batch.batch().inputs(), + inputType, + timeout + ); + logger.trace("Executing batch index={}", batchIndex); + + ActionListener listener = batch.listener() + .delegateFailureAndWrap( + (l, inferenceResult) -> translateToChunkedResult(esModel.getTaskType(), inferenceResult.getInferenceResults(), l) + ); + if (runAfter != null) { + listener = ActionListener.runAfter(listener, runAfter); + } + if (maybeDeploy) { + listener = listener.delegateResponse((l, exception) -> maybeStartDeployment(esModel, exception, inferenceRequest, l)); + } + client.execute(InferModelAction.INSTANCE, inferenceRequest, listener); + } + } + public static class Configuration { public static InferenceServiceConfiguration get() { return configuration.getOrCompute(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java index c1be537a6b0a7..4fdf254101d3e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java @@ -24,12 +24,25 @@ import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.startsWith; public class EmbeddingRequestChunkerTests extends ESTestCase { + public void testEmptyInput() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of(), 100, 100, 10, embeddingType).batchRequestsWithListeners(testListener()); + assertThat(batches, empty()); + } + + public void testBlankInput() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of(""), 100, 100, 10, embeddingType).batchRequestsWithListeners(testListener()); + assertThat(batches, hasSize(1)); + } + public void testShortInputsAreSingleBatch() { String input = "one chunk"; var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index 89a27a921cbea..9a4d0dda82238 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; @@ -65,6 +66,7 @@ import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsTests; import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.chunking.WordBoundaryChunkingSettings; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.junit.After; import org.junit.Before; @@ -72,12 +74,14 @@ import org.mockito.Mockito; import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -832,16 +836,16 @@ public void testParsePersistedConfig() { } } - public void testChunkInfer_E5WithNullChunkingSettings() { + public void testChunkInfer_E5WithNullChunkingSettings() throws InterruptedException { testChunkInfer_e5(null); } - public void testChunkInfer_E5ChunkingSettingsSet() { + public void testChunkInfer_E5ChunkingSettingsSet() throws InterruptedException { testChunkInfer_e5(ChunkingSettingsTests.createRandomChunkingSettings()); } @SuppressWarnings("unchecked") - private void testChunkInfer_e5(ChunkingSettings chunkingSettings) { + private void testChunkInfer_e5(ChunkingSettings chunkingSettings) throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); @@ -889,6 +893,9 @@ private void testChunkInfer_e5(ChunkingSettings chunkingSettings) { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -897,22 +904,23 @@ private void testChunkInfer_e5(ChunkingSettings chunkingSettings) { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } - public void testChunkInfer_SparseWithNullChunkingSettings() { + public void testChunkInfer_SparseWithNullChunkingSettings() throws InterruptedException { testChunkInfer_Sparse(null); } - public void testChunkInfer_SparseWithChunkingSettingsSet() { + public void testChunkInfer_SparseWithChunkingSettingsSet() throws InterruptedException { testChunkInfer_Sparse(ChunkingSettingsTests.createRandomChunkingSettings()); } @SuppressWarnings("unchecked") - private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { + private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); @@ -936,6 +944,7 @@ private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { var service = createService(client); var gotResults = new AtomicBoolean(); + var resultsListener = ActionListener.>wrap(chunkedResponse -> { assertThat(chunkedResponse, hasSize(2)); assertThat(chunkedResponse.get(0), instanceOf(InferenceChunkedSparseEmbeddingResults.class)); @@ -955,6 +964,9 @@ private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -963,22 +975,23 @@ private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } - public void testChunkInfer_ElserWithNullChunkingSettings() { + public void testChunkInfer_ElserWithNullChunkingSettings() throws InterruptedException { testChunkInfer_Elser(null); } - public void testChunkInfer_ElserWithChunkingSettingsSet() { + public void testChunkInfer_ElserWithChunkingSettingsSet() throws InterruptedException { testChunkInfer_Elser(ChunkingSettingsTests.createRandomChunkingSettings()); } @SuppressWarnings("unchecked") - private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) { + private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); @@ -1022,6 +1035,9 @@ private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -1030,9 +1046,10 @@ private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } @@ -1093,7 +1110,7 @@ public void testChunkInferSetsTokenization() { } @SuppressWarnings("unchecked") - public void testChunkInfer_FailsBatch() { + public void testChunkInfer_FailsBatch() throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); @@ -1129,6 +1146,9 @@ public void testChunkInfer_FailsBatch() { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -1137,12 +1157,86 @@ public void testChunkInfer_FailsBatch() { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } + @SuppressWarnings("unchecked") + public void testChunkingLargeDocument() throws InterruptedException { + int numBatches = randomIntBetween(3, 6); + + // how many response objects to return in each batch + int[] numResponsesPerBatch = new int[numBatches]; + for (int i = 0; i < numBatches - 1; i++) { + numResponsesPerBatch[i] = ElasticsearchInternalService.EMBEDDING_MAX_BATCH_SIZE; + } + numResponsesPerBatch[numBatches - 1] = randomIntBetween(1, ElasticsearchInternalService.EMBEDDING_MAX_BATCH_SIZE); + int numChunks = Arrays.stream(numResponsesPerBatch).sum(); + + // build a doc with enough words to make numChunks of chunks + int wordsPerChunk = 10; + int numWords = numChunks * wordsPerChunk; + var input = "word ".repeat(numWords); + + Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + + // mock the inference response + doAnswer(invocationOnMock -> { + var request = (InferModelAction.Request) invocationOnMock.getArguments()[1]; + var listener = (ActionListener) invocationOnMock.getArguments()[2]; + var mlTrainedModelResults = new ArrayList(); + for (int i = 0; i < request.numberOfDocuments(); i++) { + mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); + } + var response = new InferModelAction.Response(mlTrainedModelResults, "foo", true); + listener.onResponse(response); + return null; + }).when(client).execute(same(InferModelAction.INSTANCE), any(InferModelAction.Request.class), any(ActionListener.class)); + + var service = createService(client); + + var gotResults = new AtomicBoolean(); + var resultsListener = ActionListener.>wrap(chunkedResponse -> { + assertThat(chunkedResponse, hasSize(1)); + assertThat(chunkedResponse.get(0), instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var sparseResults = (InferenceChunkedTextEmbeddingFloatResults) chunkedResponse.get(0); + assertThat(sparseResults.chunks(), hasSize(numChunks)); + + gotResults.set(true); + }, ESTestCase::fail); + + // Create model using the word boundary chunker. + var model = new MultilingualE5SmallModel( + "foo", + TaskType.TEXT_EMBEDDING, + "e5", + new MultilingualE5SmallInternalServiceSettings(1, 1, "cross-platform", null), + new WordBoundaryChunkingSettings(wordsPerChunk, 0) + ); + + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + + // For the given input we know how many requests will be made + service.chunkedInfer( + model, + null, + List.of(input), + Map.of(), + InputType.SEARCH, + new ChunkingOptions(null, null), + InferenceAction.Request.DEFAULT_TIMEOUT, + latchedListener + ); + + latch.await(); + assertTrue("Listener not called with results", gotResults.get()); + } + public void testParsePersistedConfig_Rerank() { // with task settings { From 6303de34e44e32372f9d3d4be68bf6d243d9b110 Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Tue, 12 Nov 2024 12:58:04 +0100 Subject: [PATCH 51/95] Fix NPE in MlMemoryAutoscalingDecider (#116650) * Fix NPE in MlMemoryAutoscalingDecider * Update docs/changelog/116650.yaml * Update 116650.yaml * Update docs/changelog/116650.yaml * better fix --- docs/changelog/116650.yaml | 5 +++++ .../xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java | 6 +++++- .../xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/116650.yaml diff --git a/docs/changelog/116650.yaml b/docs/changelog/116650.yaml new file mode 100644 index 0000000000000..d314a918aede9 --- /dev/null +++ b/docs/changelog/116650.yaml @@ -0,0 +1,5 @@ +pr: 116650 +summary: Fix bug in ML autoscaling when some node info is unavailable +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java index bab7bb52f928f..5a06308a3c8cc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java @@ -17,7 +17,11 @@ public static Builder builder(ByteSizeValue nodeSize, ByteSizeValue tierSize) { } public static Builder from(AutoscalingCapacity autoscalingCapacity) { - return builder(autoscalingCapacity.node().memory(), autoscalingCapacity.total().memory()); + if (autoscalingCapacity == null) { + return builder(null, null); + } else { + return builder(autoscalingCapacity.node().memory(), autoscalingCapacity.total().memory()); + } } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java index dfe0e557f749d..0ff6aece95ab1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java @@ -809,7 +809,7 @@ static MlMemoryAutoscalingCapacity ensureScaleDown( MlMemoryAutoscalingCapacity scaleDownResult, MlMemoryAutoscalingCapacity currentCapacity ) { - if (scaleDownResult == null || currentCapacity == null) { + if (scaleDownResult == null || currentCapacity == null || currentCapacity.isUndetermined()) { return null; } MlMemoryAutoscalingCapacity newCapacity = MlMemoryAutoscalingCapacity.builder( From 85b2bab2e38409449a37adc9408902cbf79f8c8f Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Tue, 12 Nov 2024 12:17:04 +0000 Subject: [PATCH 52/95] Bump versions after 8.15.4 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 6 +++--- .buildkite/pipelines/periodic.yml | 10 +++++----- .ci/bwcVersions | 2 +- .ci/snapshotBwcVersions | 2 +- server/src/main/java/org/elasticsearch/Version.java | 1 + .../resources/org/elasticsearch/TransportVersions.csv | 1 + .../org/elasticsearch/index/IndexVersions.csv | 1 + 8 files changed, 14 insertions(+), 11 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 37ea49e3a6d95..167830d3ed8b3 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.15.4", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 788960c76e150..0f2e70addd684 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -272,8 +272,8 @@ steps: env: BWC_VERSION: 8.14.3 - - label: "{{matrix.image}} / 8.15.4 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.4 + - label: "{{matrix.image}} / 8.15.5 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.5 timeout_in_minutes: 300 matrix: setup: @@ -286,7 +286,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.15.4 + BWC_VERSION: 8.15.5 - label: "{{matrix.image}} / 8.16.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 7b6a6ea72fe83..f68f64332426c 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -287,8 +287,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.15.4 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.4#bwcTest + - label: 8.15.5 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.5#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -297,7 +297,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.15.4 + BWC_VERSION: 8.15.5 retry: automatic: - exit_status: "-1" @@ -429,7 +429,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.15.4", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -471,7 +471,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.15.4", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 2e77631450825..b4a4460ff5a80 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -14,7 +14,7 @@ BWC_VERSION: - "8.12.2" - "8.13.4" - "8.14.3" - - "8.15.4" + - "8.15.5" - "8.16.0" - "8.17.0" - "9.0.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index c6edc709a8ceb..7dad55b653925 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "8.15.4" + - "8.15.5" - "8.16.0" - "8.17.0" - "9.0.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 5e4df05c10182..909d733fd3719 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -187,6 +187,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_2 = new Version(8_15_02_99); public static final Version V_8_15_3 = new Version(8_15_03_99); public static final Version V_8_15_4 = new Version(8_15_04_99); + public static final Version V_8_15_5 = new Version(8_15_05_99); public static final Version V_8_16_0 = new Version(8_16_00_99); public static final Version V_8_17_0 = new Version(8_17_00_99); public static final Version V_9_0_0 = new Version(9_00_00_99); diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index b0ef5b780e775..26c518962c19a 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -131,3 +131,4 @@ 8.15.1,8702002 8.15.2,8702003 8.15.3,8702003 +8.15.4,8702003 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index e3681cc975988..6cab0b513ee63 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -131,3 +131,4 @@ 8.15.1,8512000 8.15.2,8512000 8.15.3,8512000 +8.15.4,8512000 From 6c85934c18e22eca5109e66a66174cae339fa040 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Tue, 12 Nov 2024 12:18:50 +0000 Subject: [PATCH 53/95] Prune changelogs after 8.15.4 release --- docs/changelog/112250.yaml | 5 ----- docs/changelog/113723.yaml | 6 ------ docs/changelog/114407.yaml | 6 ------ docs/changelog/114533.yaml | 5 ----- docs/changelog/114601.yaml | 6 ------ docs/changelog/115181.yaml | 5 ----- docs/changelog/115308.yaml | 6 ------ docs/changelog/115430.yaml | 5 ----- docs/changelog/115459.yaml | 5 ----- docs/changelog/115510.yaml | 6 ------ docs/changelog/115834.yaml | 5 ----- docs/changelog/116031.yaml | 6 ------ docs/changelog/116219.yaml | 6 ------ 13 files changed, 72 deletions(-) delete mode 100644 docs/changelog/112250.yaml delete mode 100644 docs/changelog/113723.yaml delete mode 100644 docs/changelog/114407.yaml delete mode 100644 docs/changelog/114533.yaml delete mode 100644 docs/changelog/114601.yaml delete mode 100644 docs/changelog/115181.yaml delete mode 100644 docs/changelog/115308.yaml delete mode 100644 docs/changelog/115430.yaml delete mode 100644 docs/changelog/115459.yaml delete mode 100644 docs/changelog/115510.yaml delete mode 100644 docs/changelog/115834.yaml delete mode 100644 docs/changelog/116031.yaml delete mode 100644 docs/changelog/116219.yaml diff --git a/docs/changelog/112250.yaml b/docs/changelog/112250.yaml deleted file mode 100644 index edbb5667d4b9d..0000000000000 --- a/docs/changelog/112250.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112250 -summary: Do not exclude empty arrays or empty objects in source filtering -area: Search -type: bug -issues: [109668] diff --git a/docs/changelog/113723.yaml b/docs/changelog/113723.yaml deleted file mode 100644 index 2cbcf49102719..0000000000000 --- a/docs/changelog/113723.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113723 -summary: Fix max file size check to use `getMaxFileSize` -area: Infra/Core -type: bug -issues: - - 113705 diff --git a/docs/changelog/114407.yaml b/docs/changelog/114407.yaml deleted file mode 100644 index 4c1134a9d3834..0000000000000 --- a/docs/changelog/114407.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114407 -summary: Fix synthetic source handling for `bit` type in `dense_vector` field -area: Search -type: bug -issues: - - 114402 diff --git a/docs/changelog/114533.yaml b/docs/changelog/114533.yaml deleted file mode 100644 index f45589e8de921..0000000000000 --- a/docs/changelog/114533.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114533 -summary: Fix dim validation for bit `element_type` -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/114601.yaml b/docs/changelog/114601.yaml deleted file mode 100644 index d2f563d62a639..0000000000000 --- a/docs/changelog/114601.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114601 -summary: Support semantic_text in object fields -area: Vector Search -type: bug -issues: - - 114401 diff --git a/docs/changelog/115181.yaml b/docs/changelog/115181.yaml deleted file mode 100644 index 65f59d5ed0add..0000000000000 --- a/docs/changelog/115181.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115181 -summary: Always check the parent breaker with zero bytes in `PreallocatedCircuitBreakerService` -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/115308.yaml b/docs/changelog/115308.yaml deleted file mode 100644 index 163f0232a3e58..0000000000000 --- a/docs/changelog/115308.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115308 -summary: "ESQL: Disable pushdown of WHERE past STATS" -area: ES|QL -type: bug -issues: - - 115281 diff --git a/docs/changelog/115430.yaml b/docs/changelog/115430.yaml deleted file mode 100644 index c2903f7751012..0000000000000 --- a/docs/changelog/115430.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115430 -summary: Prevent NPE if model assignment is removed while waiting to start -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115459.yaml b/docs/changelog/115459.yaml deleted file mode 100644 index b20a8f765c084..0000000000000 --- a/docs/changelog/115459.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115459 -summary: Guard blob store local directory creation with `doPrivileged` -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/115510.yaml b/docs/changelog/115510.yaml deleted file mode 100644 index 1e71270e18f97..0000000000000 --- a/docs/changelog/115510.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115510 -summary: Fix lingering license warning header in IP filter -area: License -type: bug -issues: - - 114865 diff --git a/docs/changelog/115834.yaml b/docs/changelog/115834.yaml deleted file mode 100644 index 91f9e9a4e2e41..0000000000000 --- a/docs/changelog/115834.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115834 -summary: Try to simplify geometries that fail with `TopologyException` -area: Geo -type: bug -issues: [] diff --git a/docs/changelog/116031.yaml b/docs/changelog/116031.yaml deleted file mode 100644 index e30552bf3b513..0000000000000 --- a/docs/changelog/116031.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116031 -summary: Resolve pipelines from template on lazy rollover write -area: Data streams -type: bug -issues: - - 112781 diff --git a/docs/changelog/116219.yaml b/docs/changelog/116219.yaml deleted file mode 100644 index aeeea68570e77..0000000000000 --- a/docs/changelog/116219.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116219 -summary: "[apm-data] Apply lazy rollover on index template creation" -area: Data streams -type: bug -issues: - - 116230 From 098c8dad900bd46bfdac0156b5c10a173929e175 Mon Sep 17 00:00:00 2001 From: Jedr Blaszyk Date: Tue, 12 Nov 2024 15:27:58 +0100 Subject: [PATCH 54/95] [Docs] Fix sharepoint docs for 8.16 release (#116661) --- .../docs/_connectors-overview-table.asciidoc | 2 +- .../connector/docs/connectors-sharepoint.asciidoc | 12 +++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/reference/connector/docs/_connectors-overview-table.asciidoc b/docs/reference/connector/docs/_connectors-overview-table.asciidoc index f25ea3deceeee..f5f8103349dde 100644 --- a/docs/reference/connector/docs/_connectors-overview-table.asciidoc +++ b/docs/reference/connector/docs/_connectors-overview-table.asciidoc @@ -44,7 +44,7 @@ NOTE: All connectors are available as self-managed <>|*GA*|8.12+|8.12+|8.11+|8.13+|8.13+|https://github.com/elastic/connectors/tree/main/connectors/sources/salesforce.py[View code] |<>|*GA*|8.10+|8.10+|8.11+|8.13+|8.13+|https://github.com/elastic/connectors/tree/main/connectors/sources/servicenow.py[View code] |<>|*GA*|8.9+|8.9+|8.9+|8.9+|8.9+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_online.py[View code] -|<>|*Beta*|8.15+|-|8.11+|8.13+|8.14+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_server.py[View code] +|<>|*Beta*|8.15+|-|8.11+|8.13+|8.15+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_server.py[View code] |<>|*Preview*|8.14+|-|-|-|-|https://github.com/elastic/connectors/tree/main/connectors/sources/slack.py[View code] |<>|*Preview*|8.14+|-|-|8.13+|-|https://github.com/elastic/connectors/tree/main/connectors/sources/teams.py[View code] |<>|*Preview*|8.14+|-|8.11+|8.13+|-|https://github.com/elastic/connectors/tree/main/connectors/sources/zoom.py[View code] diff --git a/docs/reference/connector/docs/connectors-sharepoint.asciidoc b/docs/reference/connector/docs/connectors-sharepoint.asciidoc index f5590daa1e701..d7a2307a9db80 100644 --- a/docs/reference/connector/docs/connectors-sharepoint.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint.asciidoc @@ -67,6 +67,9 @@ The following SharePoint Server versions are compatible: The following configuration fields are required to set up the connector: +`authentication`:: +Authentication mode, either *Basic* or *NTLM*. + `username`:: The username of the account for the SharePoint Server instance. @@ -133,7 +136,7 @@ The connector syncs the following SharePoint object types: [NOTE] ==== * Content from files bigger than 10 MB won't be extracted by default. Use the <> to handle larger binary files. -* Permissions are not synced. **All documents** indexed to an Elastic deployment will be visible to **all users with access** to that Elasticsearch Index. +* Permissions are not synced by default. Enable <> to sync permissions. ==== [discrete#es-connectors-sharepoint-sync-types] @@ -191,7 +194,7 @@ This connector is written in Python using the {connectors-python}[Elastic connec View the {connectors-python}/connectors/sources/sharepoint_server.py[source code for this connector^] (branch _{connectors-branch}_, compatible with Elastic _{minor-version}_). -// Closing the collapsible section +// Closing the collapsible section =============== @@ -254,6 +257,9 @@ Once connected, you'll be able to update these values in Kibana. The following configuration fields are required to set up the connector: +`authentication`:: +Authentication mode, either *Basic* or *NTLM*. + `username`:: The username of the account for the SharePoint Server instance. @@ -408,5 +414,5 @@ This connector is written in Python using the {connectors-python}[Elastic connec View the {connectors-python}/connectors/sources/sharepoint_server.py[source code for this connector^] (branch _{connectors-branch}_, compatible with Elastic _{minor-version}_). -// Closing the collapsible section +// Closing the collapsible section =============== From ade29fb8f444731a81cbf2dff2ba6c93206fccdc Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 12 Nov 2024 15:50:50 +0100 Subject: [PATCH 55/95] Deduplicate DocValueFormat objects from InternalAggregation when deserializing (#116640) --- .../elasticsearch/search/DocValueFormat.java | 21 +++++++++++++++++-- .../elasticsearch/search/SearchModule.java | 4 ++-- .../search/DocValueFormatTests.java | 4 ++-- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 51f52326907eb..a1e8eb25f4780 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -12,6 +12,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.DelayableWriteable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -260,7 +261,7 @@ private DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resol this.formatSortValues = formatSortValues; } - public DateTime(StreamInput in) throws IOException { + private DateTime(StreamInput in) throws IOException { String formatterPattern = in.readString(); Locale locale = in.getTransportVersion().onOrAfter(TransportVersions.DATE_TIME_DOC_VALUES_LOCALES) ? LocaleUtils.parse(in.readString()) @@ -285,6 +286,14 @@ public String getWriteableName() { return NAME; } + public static DateTime readFrom(StreamInput in) throws IOException { + final DateTime dateTime = new DateTime(in); + if (in instanceof DelayableWriteable.Deduplicator d) { + return d.deduplicate(dateTime); + } + return dateTime; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); @@ -528,7 +537,7 @@ public Decimal(String pattern) { this.format = new DecimalFormat(pattern, SYMBOLS); } - public Decimal(StreamInput in) throws IOException { + private Decimal(StreamInput in) throws IOException { this(in.readString()); } @@ -537,6 +546,14 @@ public String getWriteableName() { return NAME; } + public static Decimal readFrom(StreamInput in) throws IOException { + final Decimal decimal = new Decimal(in); + if (in instanceof DelayableWriteable.Deduplicator d) { + return d.deduplicate(decimal); + } + return decimal; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(pattern); diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index fd39a95bdb75d..7a8b4e0cfe95a 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -1013,8 +1013,8 @@ private void registerScoreFunction(ScoreFunctionSpec scoreFunction) { private void registerValueFormats() { registerValueFormat(DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN); - registerValueFormat(DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new); - registerValueFormat(DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new); + registerValueFormat(DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::readFrom); + registerValueFormat(DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::readFrom); registerValueFormat(DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH); registerValueFormat(DocValueFormat.GEOTILE.getWriteableName(), in -> DocValueFormat.GEOTILE); registerValueFormat(DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP); diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index e81066a731d2e..7c9a68cbc91f1 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -43,8 +43,8 @@ public class DocValueFormatTests extends ESTestCase { public void testSerialization() throws Exception { List entries = new ArrayList<>(); entries.add(new Entry(DocValueFormat.class, DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN)); - entries.add(new Entry(DocValueFormat.class, DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new)); - entries.add(new Entry(DocValueFormat.class, DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new)); + entries.add(new Entry(DocValueFormat.class, DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::readFrom)); + entries.add(new Entry(DocValueFormat.class, DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::readFrom)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOTILE.getWriteableName(), in -> DocValueFormat.GEOTILE)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP)); From 94ab1a6fa7638f8905538028a330b2901c01b8b0 Mon Sep 17 00:00:00 2001 From: Pawan Kartik Date: Tue, 12 Nov 2024 15:10:33 +0000 Subject: [PATCH 56/95] Add tests for RCS1:ES|QL to verify behaviour for disconnected clusters (#116449) * Add tests for RCS1:ES|QL to verify behaviour for disconnected clusters * fix: build * Add missing assertions for ccs metadata * Address review comments --- ...ssClusterEsqlRCS1UnavailableRemotesIT.java | 286 ++++++++++++++++++ 1 file changed, 286 insertions(+) create mode 100644 x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java new file mode 100644 index 0000000000000..b6fc43e2a6e48 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java @@ -0,0 +1,286 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.greaterThan; + +public class CrossClusterEsqlRCS1UnavailableRemotesIT extends AbstractRemoteClusterSecurityTestCase { + private static final AtomicBoolean SSL_ENABLED_REF = new AtomicBoolean(); + + static { + fulfillingCluster = ElasticsearchCluster.local() + .name("fulfilling-cluster") + .nodes(1) + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("remote_cluster.port", "0") + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_server.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + .node(0, spec -> spec.setting("remote_cluster_server.enabled", "true")) + .build(); + + queryCluster = ElasticsearchCluster.local() + .name("query-cluster") + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_client.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .build(); + } + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + @Before + public void setupPreRequisites() throws IOException { + setupRolesAndPrivileges(); + loadData(); + } + + public void testEsqlRcs1UnavailableRemoteScenarios() throws Exception { + clusterShutDownWithRandomSkipUnavailable(); + remoteClusterShutdownWithSkipUnavailableTrue(); + remoteClusterShutdownWithSkipUnavailableFalse(); + } + + private void clusterShutDownWithRandomSkipUnavailable() throws Exception { + // skip_unavailable is set to a random boolean value. + // However, no clusters are stopped. Hence, we do not expect any other behaviour + // other than a 200-OK. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), randomBoolean()); + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + Response response = client().performRequest(esqlRequest(query)); + + Map map = responseAsMap(response); + ArrayList columns = (ArrayList) map.get("columns"); + ArrayList values = (ArrayList) map.get("values"); + Map clusters = (Map) map.get("_clusters"); + Map clusterDetails = (Map) clusters.get("details"); + Map localClusterDetails = (Map) clusterDetails.get("(local)"); + Map remoteClusterDetails = (Map) clusterDetails.get("my_remote_cluster"); + + assertOK(response); + assertThat((int) map.get("took"), greaterThan(0)); + assertThat(columns.size(), is(4)); + assertThat(values.size(), is(9)); + + assertThat((int) clusters.get("total"), is(2)); + assertThat((int) clusters.get("successful"), is(2)); + assertThat((int) clusters.get("running"), is(0)); + assertThat((int) clusters.get("skipped"), is(0)); + assertThat((int) clusters.get("partial"), is(0)); + assertThat((int) clusters.get("failed"), is(0)); + + assertThat(clusterDetails.size(), is(2)); + assertThat((int) localClusterDetails.get("took"), greaterThan(0)); + assertThat(localClusterDetails.get("status"), is("successful")); + + assertThat((int) remoteClusterDetails.get("took"), greaterThan(0)); + assertThat(remoteClusterDetails.get("status"), is("successful")); + } + + @SuppressWarnings("unchecked") + private void remoteClusterShutdownWithSkipUnavailableTrue() throws Exception { + // Remote cluster is stopped and skip unavailable is set to true. + // We expect no exception and partial results from the remaining open cluster. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), true); + + try { + // Stop remote cluster. + fulfillingCluster.stop(true); + + // A simple query that targets our remote cluster. + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + Response response = client().performRequest(esqlRequest(query)); + + Map map = responseAsMap(response); + ArrayList columns = (ArrayList) map.get("columns"); + ArrayList values = (ArrayList) map.get("values"); + Map clusters = (Map) map.get("_clusters"); + Map clusterDetails = (Map) clusters.get("details"); + Map localClusterDetails = (Map) clusterDetails.get("(local)"); + Map remoteClusterDetails = (Map) clusterDetails.get("my_remote_cluster"); + + // Assert results obtained from the local cluster and that remote cluster was + // skipped. + assertOK(response); + assertThat((int) map.get("took"), greaterThan(0)); + + assertThat(columns.size(), is(2)); + assertThat(values.size(), is(5)); + + assertThat((int) clusters.get("total"), is(2)); + assertThat((int) clusters.get("successful"), is(1)); + assertThat((int) clusters.get("skipped"), is(1)); + assertThat((int) clusters.get("running"), is(0)); + assertThat((int) clusters.get("partial"), is(0)); + assertThat((int) clusters.get("failed"), is(0)); + + assertThat(clusterDetails.size(), is(2)); + assertThat((int) localClusterDetails.get("took"), greaterThan(0)); + assertThat(localClusterDetails.get("status"), is("successful")); + + assertThat((int) remoteClusterDetails.get("took"), greaterThan(0)); + assertThat(remoteClusterDetails.get("status"), is("skipped")); + + } catch (ResponseException r) { + throw new AssertionError(r); + } finally { + fulfillingCluster.start(); + closeFulfillingClusterClient(); + initFulfillingClusterClient(); + } + } + + private void remoteClusterShutdownWithSkipUnavailableFalse() throws Exception { + // Remote cluster is stopped and skip_unavailable is set to false. + // Although the other cluster is open, we expect an Exception. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), false); + + try { + // Stop remote cluster. + fulfillingCluster.stop(true); + + // A simple query that targets our remote cluster. + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + ResponseException ex = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(query))); + assertThat(ex.getMessage(), containsString("connect_transport_exception")); + } finally { + fulfillingCluster.start(); + closeFulfillingClusterClient(); + initFulfillingClusterClient(); + } + } + + private void setupRolesAndPrivileges() throws IOException { + var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); + putUserRequest.setJsonEntity(""" + { + "password": "x-pack-test-password", + "roles" : ["remote_search"] + }"""); + assertOK(adminClient().performRequest(putUserRequest)); + + var putRoleOnRemoteClusterRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleOnRemoteClusterRequest.setJsonEntity(""" + { + "indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"] + } + ], + "remote_indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"], + "clusters": ["my_remote_cluster"] + } + ] + }"""); + assertOK(adminClient().performRequest(putRoleOnRemoteClusterRequest)); + } + + private void loadData() throws IOException { + Request createIndex = new Request("PUT", "points"); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "id": { "type": "integer" }, + "score": { "type": "integer" } + } + } + } + """); + assertOK(client().performRequest(createIndex)); + + Request bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": { "_index": "points" } } + { "id": 1, "score": 75} + { "index": { "_index": "points" } } + { "id": 2, "score": 125} + { "index": { "_index": "points" } } + { "id": 3, "score": 100} + { "index": { "_index": "points" } } + { "id": 4, "score": 50} + { "index": { "_index": "points" } } + { "id": 5, "score": 150} + """); + assertOK(client().performRequest(bulkRequest)); + + createIndex = new Request("PUT", "squares"); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "num": { "type": "integer" }, + "square": { "type": "integer" } + } + } + } + """); + assertOK(performRequestAgainstFulfillingCluster(createIndex)); + + bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": {"_index": "squares"}} + { "num": 1, "square": 1 } + { "index": {"_index": "squares"}} + { "num": 2, "square": 4 } + { "index": {"_index": "squares"}} + { "num": 3, "square": 9 } + { "index": {"_index": "squares"}} + { "num": 4, "square": 16 } + """); + assertOK(performRequestAgainstFulfillingCluster(bulkRequest)); + } + + private Request esqlRequest(String query) throws IOException { + XContentBuilder body = JsonXContent.contentBuilder(); + + body.startObject(); + body.field("query", query); + body.field("include_ccs_metadata", true); + body.endObject(); + + Request request = new Request("POST", "_query"); + request.setJsonEntity(Strings.toString(body)); + + return request; + } +} From b7167b73e377f7d42f56646b18908eaa7069a79f Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Tue, 12 Nov 2024 09:13:37 -0600 Subject: [PATCH 57/95] Docs for monitor_stats privilege (#116533) This commit adds docs for monitor_stats and updates an example snippet to include both remote_indices and remote_cluster. --- .../security/bulk-create-roles.asciidoc | 4 +++- .../rest-api/security/create-roles.asciidoc | 22 ++++++++++++++----- .../authorization/managing-roles.asciidoc | 8 +++---- .../authorization/privileges.asciidoc | 5 +++++ 4 files changed, 27 insertions(+), 12 deletions(-) diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc index 560e8b74cdd2c..37f49f2445770 100644 --- a/docs/reference/rest-api/security/bulk-create-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -102,7 +102,9 @@ They have no effect for remote clusters configured with the <> can be used to determine +which privileges are allowed per version. For more information, see <>. diff --git a/docs/reference/rest-api/security/create-roles.asciidoc b/docs/reference/rest-api/security/create-roles.asciidoc index a1ab892330e67..d23b9f06e2d87 100644 --- a/docs/reference/rest-api/security/create-roles.asciidoc +++ b/docs/reference/rest-api/security/create-roles.asciidoc @@ -105,7 +105,9 @@ They have no effect for remote clusters configured with the <> can be used to determine +which privileges are allowed per version. For more information, see <>. @@ -176,21 +178,29 @@ POST /_security/role/cli_or_drivers_minimal -------------------------------------------------- // end::sql-queries-permission[] -The following example configures a role with remote indices privileges on a remote cluster: +The following example configures a role with remote indices and remote cluster privileges for a remote cluster: [source,console] -------------------------------------------------- -POST /_security/role/role_with_remote_indices +POST /_security/role/only_remote_access_role { "remote_indices": [ { - "clusters": [ "my_remote" ], <1> + "clusters": ["my_remote"], <1> "names": ["logs*"], <2> "privileges": ["read", "read_cross_cluster", "view_index_metadata"] <3> } + ], + "remote_cluster": [ + { + "clusters": ["my_remote"], <1> + "privileges": ["monitor_stats"] <4> + } ] } -------------------------------------------------- -<1> The remote indices privileges apply to remote cluster with the alias `my_remote`. -<2> Privileges are granted for indices matching pattern `logs*` on the remote cluster ( `my_remote`). +<1> The remote indices and remote cluster privileges apply to remote cluster with the alias `my_remote`. +<2> Privileges are granted for indices matching pattern `logs*` on the remote cluster (`my_remote`). <3> The actual <> granted for `logs*` on `my_remote`. +<4> The actual <> granted for `my_remote`. +Note - only a subset of the cluster privileges are supported for remote clusters. diff --git a/docs/reference/security/authorization/managing-roles.asciidoc b/docs/reference/security/authorization/managing-roles.asciidoc index 535d70cbc5e9c..0c3f520605f07 100644 --- a/docs/reference/security/authorization/managing-roles.asciidoc +++ b/docs/reference/security/authorization/managing-roles.asciidoc @@ -249,12 +249,10 @@ The following describes the structure of a remote cluster permissions entry: <> and <>. This field is required. <2> The cluster level privileges for the remote cluster. The allowed values here are a subset of the -<>. This field is required. +<>. +The <> can be used to determine +which privileges are allowed here. This field is required. -The `monitor_enrich` privilege for remote clusters was introduced in version -8.15.0. Currently, this is the only privilege available for remote clusters and -is required to enable users to use the `ENRICH` keyword in ES|QL queries across -clusters. ==== Example diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index 747b1eef40441..3b69e5c1ba984 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -250,6 +250,11 @@ Privileges to list and view details on existing repositories and snapshots. + This privilege is not available in {serverless-full}. +`monitor_stats`:: +Privileges to list and view details of stats. ++ +This privilege is not available in {serverless-full}. + `monitor_text_structure`:: All read-only operations related to the <>. + From a71c132481217d2a803cc493da903d14076c9e60 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:14:02 +0100 Subject: [PATCH 58/95] [DOCS] Update sharepoint-online connector perms (#116641) --- .../docs/connectors-sharepoint-online.asciidoc | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc index 95ff8223b4d20..21d0890e436c5 100644 --- a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc @@ -87,14 +87,16 @@ Select an expiration date. (At this expiration date, you will need to generate a + ``` Graph API -- Sites.Read.All +- Sites.Selected - Files.Read.All - Group.Read.All - User.Read.All Sharepoint -- Sites.Read.All +- Sites.Selected ``` +NOTE: If the `Comma-separated list of sites` configuration is set to `*` or if a user enables the toggle button `Enumerate all sites`, the connector requires `Sites.Read.All` permission. + * **Grant admin consent**, using the `Grant Admin Consent` link from the permissions screen. * Save the tenant name (i.e. Domain name) of Azure platform. @@ -138,7 +140,7 @@ Refer to https://learn.microsoft.com/en-us/sharepoint/dev/general-development/ho Here's a summary of why we use these Graph API permissions: -* *Sites.Read.All* is used to fetch the sites and their metadata +* *Sites.Selected* is used to fetch the sites and their metadata * *Files.Read.All* is used to fetch Site Drives and files in these drives * *Groups.Read.All* is used to fetch groups for document-level permissions * *User.Read.All* is used to fetch user information for document-level permissions @@ -546,14 +548,16 @@ Select an expiration date. (At this expiration date, you will need to generate a + ``` Graph API -- Sites.Read.All +- Sites.Selected - Files.Read.All - Group.Read.All - User.Read.All Sharepoint -- Sites.Read.All +- Sites.Selected ``` +NOTE: If the `Comma-separated list of sites` configuration is set to `*` or if a user enables the toggle button `Enumerate all sites`, the connector requires `Sites.Read.All` permission. + * **Grant admin consent**, using the `Grant Admin Consent` link from the permissions screen. * Save the tenant name (i.e. Domain name) of Azure platform. @@ -597,7 +601,7 @@ Refer to https://learn.microsoft.com/en-us/sharepoint/dev/general-development/ho Here's a summary of why we use these Graph API permissions: -* *Sites.Read.All* is used to fetch the sites and their metadata +* *Sites.Selected* is used to fetch the sites and their metadata * *Files.Read.All* is used to fetch Site Drives and files in these drives * *Groups.Read.All* is used to fetch groups for document-level permissions * *User.Read.All* is used to fetch user information for document-level permissions From 7039a1dc8c886e23fda47a4b38cbab72746ac8cf Mon Sep 17 00:00:00 2001 From: Ying Mao Date: Tue, 12 Nov 2024 10:26:13 -0500 Subject: [PATCH 59/95] Adds support for `input_type` field to Vertex inference service (#116431) * Adding input type to google vertex ai service * Update docs/changelog/116431.yaml * PR feedback - backwards compatibility * Fix lint error --- docs/changelog/116431.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../GoogleVertexAiActionCreator.java | 6 +- .../GoogleVertexAiActionVisitor.java | 3 +- .../GoogleVertexAiEmbeddingsRequest.java | 2 +- ...GoogleVertexAiEmbeddingsRequestEntity.java | 37 +++- .../googlevertexai/GoogleVertexAiModel.java | 18 +- .../googlevertexai/GoogleVertexAiService.java | 4 +- .../GoogleVertexAiEmbeddingsModel.java | 51 +++++- ...VertexAiEmbeddingsRequestTaskSettings.java | 27 ++- .../GoogleVertexAiEmbeddingsTaskSettings.java | 99 ++++++++-- .../rerank/GoogleVertexAiRerankModel.java | 9 +- ...eVertexAiEmbeddingsRequestEntityTests.java | 96 ++++++++-- .../GoogleVertexAiEmbeddingsRequestTests.java | 36 +++- .../GoogleVertexAiServiceTests.java | 90 +++++++--- .../GoogleVertexAiEmbeddingsModelTests.java | 104 ++++++++++- ...xAiEmbeddingsRequestTaskSettingsTests.java | 45 ++++- ...leVertexAiEmbeddingsTaskSettingsTests.java | 170 ++++++++++++++++-- 18 files changed, 697 insertions(+), 106 deletions(-) create mode 100644 docs/changelog/116431.yaml diff --git a/docs/changelog/116431.yaml b/docs/changelog/116431.yaml new file mode 100644 index 0000000000000..50c6baf1d01c7 --- /dev/null +++ b/docs/changelog/116431.yaml @@ -0,0 +1,5 @@ +pr: 116431 +summary: Adds support for `input_type` field to Vertex inference service +area: Machine Learning +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 5f3b466f9f7bd..6e62845383a14 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -193,6 +193,7 @@ static TransportVersion def(int id) { public static final TransportVersion ROLE_MONITOR_STATS = def(8_787_00_0); public static final TransportVersion DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK = def(8_788_00_0); public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO = def(8_789_00_0); + public static final TransportVersion VERTEX_AI_INPUT_TYPE_ADDED = def(8_790_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java index 27b3ae95f1aa4..99f535f81485c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionCreator.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.external.action.googlevertexai; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.SenderExecutableAction; import org.elasticsearch.xpack.inference.external.http.sender.GoogleVertexAiEmbeddingsRequestManager; @@ -33,9 +34,10 @@ public GoogleVertexAiActionCreator(Sender sender, ServiceComponents serviceCompo } @Override - public ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings) { + public ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings, InputType inputType) { + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, taskSettings, inputType); var requestManager = new GoogleVertexAiEmbeddingsRequestManager( - model, + overriddenModel, serviceComponents.truncator(), serviceComponents.threadPool() ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java index def8f09ce06be..2b5cd5854c8ab 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/googlevertexai/GoogleVertexAiActionVisitor.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.inference.external.action.googlevertexai; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsModel; import org.elasticsearch.xpack.inference.services.googlevertexai.rerank.GoogleVertexAiRerankModel; @@ -15,7 +16,7 @@ public interface GoogleVertexAiActionVisitor { - ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings); + ExecutableAction create(GoogleVertexAiEmbeddingsModel model, Map taskSettings, InputType inputType); ExecutableAction create(GoogleVertexAiRerankModel model, Map taskSettings); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java index c0e36baf2e98f..75320bc762c8b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequest.java @@ -40,7 +40,7 @@ public HttpRequest createHttpRequest() { HttpPost httpPost = new HttpPost(model.uri()); ByteArrayEntity byteEntity = new ByteArrayEntity( - Strings.toString(new GoogleVertexAiEmbeddingsRequestEntity(truncationResult.input(), model.getTaskSettings().autoTruncate())) + Strings.toString(new GoogleVertexAiEmbeddingsRequestEntity(truncationResult.input(), model.getTaskSettings())) .getBytes(StandardCharsets.UTF_8) ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java index 2fae999599ba2..fc33df0d63acd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntity.java @@ -7,23 +7,35 @@ package org.elasticsearch.xpack.inference.external.request.googlevertexai; -import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings; import java.io.IOException; import java.util.List; import java.util.Objects; -public record GoogleVertexAiEmbeddingsRequestEntity(List inputs, @Nullable Boolean autoTruncation) implements ToXContentObject { +import static org.elasticsearch.xpack.inference.services.cohere.embeddings.CohereEmbeddingsTaskSettings.invalidInputTypeMessage; + +public record GoogleVertexAiEmbeddingsRequestEntity(List inputs, GoogleVertexAiEmbeddingsTaskSettings taskSettings) + implements + ToXContentObject { private static final String INSTANCES_FIELD = "instances"; private static final String CONTENT_FIELD = "content"; private static final String PARAMETERS_FIELD = "parameters"; private static final String AUTO_TRUNCATE_FIELD = "autoTruncate"; + private static final String TASK_TYPE_FIELD = "task_type"; + + private static final String CLASSIFICATION_TASK_TYPE = "CLASSIFICATION"; + private static final String CLUSTERING_TASK_TYPE = "CLUSTERING"; + private static final String RETRIEVAL_DOCUMENT_TASK_TYPE = "RETRIEVAL_DOCUMENT"; + private static final String RETRIEVAL_QUERY_TASK_TYPE = "RETRIEVAL_QUERY"; public GoogleVertexAiEmbeddingsRequestEntity { Objects.requireNonNull(inputs); + Objects.requireNonNull(taskSettings); } @Override @@ -35,16 +47,20 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); { builder.field(CONTENT_FIELD, input); + + if (taskSettings.getInputType() != null) { + builder.field(TASK_TYPE_FIELD, convertToString(taskSettings.getInputType())); + } } builder.endObject(); } builder.endArray(); - if (autoTruncation != null) { + if (taskSettings.autoTruncate() != null) { builder.startObject(PARAMETERS_FIELD); { - builder.field(AUTO_TRUNCATE_FIELD, autoTruncation); + builder.field(AUTO_TRUNCATE_FIELD, taskSettings.autoTruncate()); } builder.endObject(); } @@ -52,4 +68,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + + static String convertToString(InputType inputType) { + return switch (inputType) { + case INGEST -> RETRIEVAL_DOCUMENT_TASK_TYPE; + case SEARCH -> RETRIEVAL_QUERY_TASK_TYPE; + case CLASSIFICATION -> CLASSIFICATION_TASK_TYPE; + case CLUSTERING -> CLUSTERING_TASK_TYPE; + default -> { + assert false : invalidInputTypeMessage(inputType); + yield null; + } + }; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java index 17e6ec2152e7e..caa244f8af4f2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiModel.java @@ -7,13 +7,16 @@ package org.elasticsearch.xpack.inference.services.googlevertexai; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.ServiceSettings; +import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; +import java.net.URI; import java.util.Map; import java.util.Objects; @@ -21,6 +24,8 @@ public abstract class GoogleVertexAiModel extends Model { private final GoogleVertexAiRateLimitServiceSettings rateLimitServiceSettings; + protected URI uri; + public GoogleVertexAiModel( ModelConfigurations configurations, ModelSecrets secrets, @@ -34,13 +39,24 @@ public GoogleVertexAiModel( public GoogleVertexAiModel(GoogleVertexAiModel model, ServiceSettings serviceSettings) { super(model, serviceSettings); + uri = model.uri(); + rateLimitServiceSettings = model.rateLimitServiceSettings(); + } + + public GoogleVertexAiModel(GoogleVertexAiModel model, TaskSettings taskSettings) { + super(model, taskSettings); + + uri = model.uri(); rateLimitServiceSettings = model.rateLimitServiceSettings(); } - public abstract ExecutableAction accept(GoogleVertexAiActionVisitor creator, Map taskSettings); + public abstract ExecutableAction accept(GoogleVertexAiActionVisitor creator, Map taskSettings, InputType inputType); public GoogleVertexAiRateLimitServiceSettings rateLimitServiceSettings() { return rateLimitServiceSettings; } + public URI uri() { + return uri; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java index 0b4da10e7130f..a05b1a937d376 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiService.java @@ -210,7 +210,7 @@ protected void doInfer( var actionCreator = new GoogleVertexAiActionCreator(getSender(), getServiceComponents()); - var action = googleVertexAiModel.accept(actionCreator, taskSettings); + var action = googleVertexAiModel.accept(actionCreator, taskSettings, inputType); action.execute(inputs, timeout, listener); } @@ -235,7 +235,7 @@ protected void doChunkedInfer( ).batchRequestsWithListeners(listener); for (var request : batchedRequests) { - var action = googleVertexAiModel.accept(actionCreator, taskSettings); + var action = googleVertexAiModel.accept(actionCreator, taskSettings, inputType); action.execute(new DocumentsOnlyInput(request.batch().inputs()), timeout, request.listener()); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java index 1df8ee937497a..a5acbb80b76ec 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModel.java @@ -11,12 +11,14 @@ import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SettingsConfiguration; import org.elasticsearch.inference.TaskType; import org.elasticsearch.inference.configuration.SettingsConfigurationDisplayType; import org.elasticsearch.inference.configuration.SettingsConfigurationFieldType; +import org.elasticsearch.inference.configuration.SettingsConfigurationSelectOption; import org.elasticsearch.xpack.inference.external.action.ExecutableAction; import org.elasticsearch.xpack.inference.external.action.googlevertexai.GoogleVertexAiActionVisitor; import org.elasticsearch.xpack.inference.external.request.googlevertexai.GoogleVertexAiUtils; @@ -29,13 +31,25 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.stream.Stream; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE; public class GoogleVertexAiEmbeddingsModel extends GoogleVertexAiModel { - private URI uri; + public static GoogleVertexAiEmbeddingsModel of( + GoogleVertexAiEmbeddingsModel model, + Map taskSettings, + InputType inputType + ) { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(taskSettings); + return new GoogleVertexAiEmbeddingsModel( + model, + GoogleVertexAiEmbeddingsTaskSettings.of(model.getTaskSettings(), requestTaskSettings, inputType) + ); + } public GoogleVertexAiEmbeddingsModel( String inferenceEntityId, @@ -62,6 +76,10 @@ public GoogleVertexAiEmbeddingsModel(GoogleVertexAiEmbeddingsModel model, Google super(model, serviceSettings); } + public GoogleVertexAiEmbeddingsModel(GoogleVertexAiEmbeddingsModel model, GoogleVertexAiEmbeddingsTaskSettings taskSettings) { + super(model, taskSettings); + } + // Should only be used directly for testing GoogleVertexAiEmbeddingsModel( String inferenceEntityId, @@ -126,13 +144,9 @@ public GoogleVertexAiEmbeddingsRateLimitServiceSettings rateLimitServiceSettings return (GoogleVertexAiEmbeddingsRateLimitServiceSettings) super.rateLimitServiceSettings(); } - public URI uri() { - return uri; - } - @Override - public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings) { - return visitor.create(this, taskSettings); + public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings, InputType inputType) { + return visitor.create(this, taskSettings, inputType); } public static URI buildUri(String location, String projectId, String modelId) throws URISyntaxException { @@ -161,11 +175,32 @@ public static Map get() { new LazyInitializable<>(() -> { var configurationMap = new HashMap(); + configurationMap.put( + INPUT_TYPE, + new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.DROPDOWN) + .setLabel("Input Type") + .setOrder(1) + .setRequired(false) + .setSensitive(false) + .setTooltip("Specifies the type of input passed to the model.") + .setType(SettingsConfigurationFieldType.STRING) + .setOptions( + Stream.of( + InputType.CLASSIFICATION.toString(), + InputType.CLUSTERING.toString(), + InputType.INGEST.toString(), + InputType.SEARCH.toString() + ).map(v -> new SettingsConfigurationSelectOption.Builder().setLabelAndValue(v).build()).toList() + ) + .setValue("") + .build() + ); + configurationMap.put( AUTO_TRUNCATE, new SettingsConfiguration.Builder().setDisplay(SettingsConfigurationDisplayType.TOGGLE) .setLabel("Auto Truncate") - .setOrder(1) + .setOrder(2) .setRequired(false) .setSensitive(false) .setTooltip("Specifies if the API truncates inputs longer than the maximum token length automatically.") diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java index 14a67a64377e2..e39c423582151 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettings.java @@ -9,29 +9,46 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.ModelConfigurations; import java.util.Map; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalEnum; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.VALID_REQUEST_VALUES; -public record GoogleVertexAiEmbeddingsRequestTaskSettings(@Nullable Boolean autoTruncate) { +public record GoogleVertexAiEmbeddingsRequestTaskSettings(@Nullable Boolean autoTruncate, @Nullable InputType inputType) { - public static final GoogleVertexAiEmbeddingsRequestTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsRequestTaskSettings(null); + public static final GoogleVertexAiEmbeddingsRequestTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsRequestTaskSettings( + null, + null + ); public static GoogleVertexAiEmbeddingsRequestTaskSettings fromMap(Map map) { - if (map.isEmpty()) { - return GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS; + if (map == null || map.isEmpty()) { + return EMPTY_SETTINGS; } ValidationException validationException = new ValidationException(); + InputType inputType = extractOptionalEnum( + map, + INPUT_TYPE, + ModelConfigurations.TASK_SETTINGS, + InputType::fromString, + VALID_REQUEST_VALUES, + validationException + ); + Boolean autoTruncate = extractOptionalBoolean(map, GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - return new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate); + return new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate, inputType); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java index dcdbbda33575f..9b759a4661bce 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettings.java @@ -9,19 +9,24 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; +import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.EnumSet; import java.util.HashMap; import java.util.Map; import java.util.Objects; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalEnum; public class GoogleVertexAiEmbeddingsTaskSettings implements TaskSettings { @@ -29,48 +34,108 @@ public class GoogleVertexAiEmbeddingsTaskSettings implements TaskSettings { public static final String AUTO_TRUNCATE = "auto_truncate"; - public static final GoogleVertexAiEmbeddingsTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsTaskSettings( - Boolean.valueOf(null) + public static final String INPUT_TYPE = "input_type"; + + static final EnumSet VALID_REQUEST_VALUES = EnumSet.of( + InputType.INGEST, + InputType.SEARCH, + InputType.CLASSIFICATION, + InputType.CLUSTERING ); + public static final GoogleVertexAiEmbeddingsTaskSettings EMPTY_SETTINGS = new GoogleVertexAiEmbeddingsTaskSettings(null, null); + public static GoogleVertexAiEmbeddingsTaskSettings fromMap(Map map) { + if (map == null || map.isEmpty()) { + return EMPTY_SETTINGS; + } + ValidationException validationException = new ValidationException(); + InputType inputType = extractOptionalEnum( + map, + INPUT_TYPE, + ModelConfigurations.TASK_SETTINGS, + InputType::fromString, + VALID_REQUEST_VALUES, + validationException + ); + Boolean autoTruncate = extractOptionalBoolean(map, AUTO_TRUNCATE, validationException); if (validationException.validationErrors().isEmpty() == false) { throw validationException; } - return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate); + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType); } public static GoogleVertexAiEmbeddingsTaskSettings of( GoogleVertexAiEmbeddingsTaskSettings originalSettings, - GoogleVertexAiEmbeddingsRequestTaskSettings requestSettings + GoogleVertexAiEmbeddingsRequestTaskSettings requestSettings, + InputType requestInputType ) { + var inputTypeToUse = getValidInputType(originalSettings, requestSettings, requestInputType); var autoTruncate = requestSettings.autoTruncate() == null ? originalSettings.autoTruncate : requestSettings.autoTruncate(); - return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate); + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputTypeToUse); + } + + private static InputType getValidInputType( + GoogleVertexAiEmbeddingsTaskSettings originalSettings, + GoogleVertexAiEmbeddingsRequestTaskSettings requestTaskSettings, + InputType requestInputType + ) { + InputType inputTypeToUse = originalSettings.inputType; + + if (VALID_REQUEST_VALUES.contains(requestInputType)) { + inputTypeToUse = requestInputType; + } else if (requestTaskSettings.inputType() != null) { + inputTypeToUse = requestTaskSettings.inputType(); + } + + return inputTypeToUse; } + private final InputType inputType; private final Boolean autoTruncate; - public GoogleVertexAiEmbeddingsTaskSettings(@Nullable Boolean autoTruncate) { + public GoogleVertexAiEmbeddingsTaskSettings(@Nullable Boolean autoTruncate, @Nullable InputType inputType) { + validateInputType(inputType); + this.inputType = inputType; this.autoTruncate = autoTruncate; } public GoogleVertexAiEmbeddingsTaskSettings(StreamInput in) throws IOException { this.autoTruncate = in.readOptionalBoolean(); + + var inputType = (in.getTransportVersion().onOrAfter(TransportVersions.VERTEX_AI_INPUT_TYPE_ADDED)) + ? in.readOptionalEnum(InputType.class) + : null; + + validateInputType(inputType); + this.inputType = inputType; + } + + private static void validateInputType(InputType inputType) { + if (inputType == null) { + return; + } + + assert VALID_REQUEST_VALUES.contains(inputType) : invalidInputTypeMessage(inputType); } @Override public boolean isEmpty() { - return autoTruncate == null; + return inputType == null && autoTruncate == null; } public Boolean autoTruncate() { return autoTruncate; } + public InputType getInputType() { + return inputType; + } + @Override public String getWriteableName() { return NAME; @@ -84,11 +149,19 @@ public TransportVersion getMinimalSupportedVersion() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(this.autoTruncate); + + if (out.getTransportVersion().onOrAfter(TransportVersions.VERTEX_AI_INPUT_TYPE_ADDED)) { + out.writeOptionalEnum(this.inputType); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + if (inputType != null) { + builder.field(INPUT_TYPE, inputType); + } + if (autoTruncate != null) { builder.field(AUTO_TRUNCATE, autoTruncate); } @@ -101,19 +174,23 @@ public boolean equals(Object object) { if (this == object) return true; if (object == null || getClass() != object.getClass()) return false; GoogleVertexAiEmbeddingsTaskSettings that = (GoogleVertexAiEmbeddingsTaskSettings) object; - return Objects.equals(autoTruncate, that.autoTruncate); + return Objects.equals(inputType, that.inputType) && Objects.equals(autoTruncate, that.autoTruncate); } @Override public int hashCode() { - return Objects.hash(autoTruncate); + return Objects.hash(autoTruncate, inputType); + } + + public static String invalidInputTypeMessage(InputType inputType) { + return Strings.format("received invalid input type value [%s]", inputType.toString()); } @Override public TaskSettings updatedTaskSettings(Map newSettings) { - GoogleVertexAiEmbeddingsRequestTaskSettings requestSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + GoogleVertexAiEmbeddingsRequestTaskSettings updatedSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( new HashMap<>(newSettings) ); - return of(this, requestSettings); + return of(this, updatedSettings, updatedSettings.inputType() != null ? updatedSettings.inputType() : this.inputType); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java index 3f9c4f7a66560..e73d8d2e2613a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/rerank/GoogleVertexAiRerankModel.java @@ -10,6 +10,7 @@ import org.apache.http.client.utils.URIBuilder; import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.ModelSecrets; import org.elasticsearch.inference.SettingsConfiguration; @@ -34,8 +35,6 @@ public class GoogleVertexAiRerankModel extends GoogleVertexAiModel { - private URI uri; - public GoogleVertexAiRerankModel( String inferenceEntityId, TaskType taskType, @@ -122,12 +121,8 @@ public GoogleDiscoveryEngineRateLimitServiceSettings rateLimitServiceSettings() return (GoogleDiscoveryEngineRateLimitServiceSettings) super.rateLimitServiceSettings(); } - public URI uri() { - return uri; - } - @Override - public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings) { + public ExecutableAction accept(GoogleVertexAiActionVisitor visitor, Map taskSettings, InputType inputType) { return visitor.create(this, taskSettings); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java index f4912e0862e60..18ae7425aaaf2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestEntityTests.java @@ -8,10 +8,12 @@ package org.elasticsearch.xpack.inference.external.request.googlevertexai; import org.elasticsearch.common.Strings; +import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings; import java.io.IOException; import java.util.List; @@ -20,8 +22,11 @@ public class GoogleVertexAiEmbeddingsRequestEntityTests extends ESTestCase { - public void testToXContent_SingleEmbeddingRequest_WritesAutoTruncationIfDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), true); + public void testToXContent_SingleEmbeddingRequest_WritesAllFields() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc"), + new GoogleVertexAiEmbeddingsTaskSettings(true, InputType.SEARCH) + ); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -31,7 +36,8 @@ public void testToXContent_SingleEmbeddingRequest_WritesAutoTruncationIfDefined( { "instances": [ { - "content": "abc" + "content": "abc", + "task_type": "RETRIEVAL_QUERY" } ], "parameters": { @@ -42,7 +48,10 @@ public void testToXContent_SingleEmbeddingRequest_WritesAutoTruncationIfDefined( } public void testToXContent_SingleEmbeddingRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), null); + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc"), + new GoogleVertexAiEmbeddingsTaskSettings(null, InputType.INGEST) + ); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -52,15 +61,16 @@ public void testToXContent_SingleEmbeddingRequest_DoesNotWriteAutoTruncationIfNo { "instances": [ { - "content": "abc" + "content": "abc", + "task_type": "RETRIEVAL_DOCUMENT" } ] } """)); } - public void testToXContent_MultipleEmbeddingsRequest_WritesAutoTruncationIfDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), true); + public void testToXContent_SingleEmbeddingRequest_DoesNotWriteInputTypeIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc"), new GoogleVertexAiEmbeddingsTaskSettings(false, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -71,9 +81,35 @@ public void testToXContent_MultipleEmbeddingsRequest_WritesAutoTruncationIfDefin "instances": [ { "content": "abc" + } + ], + "parameters": { + "autoTruncate": false + } + } + """)); + } + + public void testToXContent_MultipleEmbeddingsRequest_WritesAllFields() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc", "def"), + new GoogleVertexAiEmbeddingsTaskSettings(true, InputType.CLUSTERING) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "instances": [ + { + "content": "abc", + "task_type": "CLUSTERING" }, { - "content": "def" + "content": "def", + "task_type": "CLUSTERING" } ], "parameters": { @@ -83,8 +119,8 @@ public void testToXContent_MultipleEmbeddingsRequest_WritesAutoTruncationIfDefin """)); } - public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { - var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), null); + public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteInputTypeIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), new GoogleVertexAiEmbeddingsTaskSettings(true, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); entity.toXContent(builder, null); @@ -99,8 +135,48 @@ public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteAutoTruncationI { "content": "def" } + ], + "parameters": { + "autoTruncate": true + } + } + """)); + } + + public void testToXContent_MultipleEmbeddingsRequest_DoesNotWriteAutoTruncationIfNotDefined() throws IOException { + var entity = new GoogleVertexAiEmbeddingsRequestEntity( + List.of("abc", "def"), + new GoogleVertexAiEmbeddingsTaskSettings(null, InputType.CLASSIFICATION) + ); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + entity.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, equalToIgnoringWhitespaceInJsonString(""" + { + "instances": [ + { + "content": "abc", + "task_type": "CLASSIFICATION" + }, + { + "content": "def", + "task_type": "CLASSIFICATION" + } ] } """)); } + + public void testToXContent_ThrowsIfInputIsNull() { + expectThrows( + NullPointerException.class, + () -> new GoogleVertexAiEmbeddingsRequestEntity(null, new GoogleVertexAiEmbeddingsTaskSettings(null, InputType.CLASSIFICATION)) + ); + } + + public void testToXContent_ThrowsIfTaskSettingsIsNull() { + expectThrows(NullPointerException.class, () -> new GoogleVertexAiEmbeddingsRequestEntity(List.of("abc", "def"), null)); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java index b28fd8d3a0cf9..a26d3496bed6b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/googlevertexai/GoogleVertexAiEmbeddingsRequestTests.java @@ -10,6 +10,7 @@ import org.apache.http.HttpHeaders; import org.apache.http.client.methods.HttpPost; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.common.Truncator; @@ -31,11 +32,11 @@ public class GoogleVertexAiEmbeddingsRequestTests extends ESTestCase { private static final String AUTH_HEADER_VALUE = "foo"; - public void testCreateRequest_WithoutDimensionsSet_And_WithoutAutoTruncateSet() throws IOException { + public void testCreateRequest_WithoutDimensionsSet_And_WithoutAutoTruncateSet_And_WithoutInputTypeSet() throws IOException { var model = "model"; var input = "input"; - var request = createRequest(model, input, null); + var request = createRequest(model, input, null, null); var httpRequest = request.createHttpRequest(); assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); @@ -54,7 +55,7 @@ public void testCreateRequest_WithAutoTruncateSet() throws IOException { var input = "input"; var autoTruncate = true; - var request = createRequest(model, input, autoTruncate); + var request = createRequest(model, input, autoTruncate, null); var httpRequest = request.createHttpRequest(); assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); @@ -68,11 +69,29 @@ public void testCreateRequest_WithAutoTruncateSet() throws IOException { assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "input")), "parameters", Map.of("autoTruncate", true)))); } + public void testCreateRequest_WithInputTypeSet() throws IOException { + var model = "model"; + var input = "input"; + + var request = createRequest(model, input, null, InputType.SEARCH); + var httpRequest = request.createHttpRequest(); + + assertThat(httpRequest.httpRequestBase(), instanceOf(HttpPost.class)); + var httpPost = (HttpPost) httpRequest.httpRequestBase(); + + assertThat(httpPost.getLastHeader(HttpHeaders.CONTENT_TYPE).getValue(), is(XContentType.JSON.mediaType())); + assertThat(httpPost.getLastHeader(HttpHeaders.AUTHORIZATION).getValue(), is(AUTH_HEADER_VALUE)); + + var requestMap = entityAsMap(httpPost.getEntity().getContent()); + assertThat(requestMap, aMapWithSize(1)); + assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "input", "task_type", "RETRIEVAL_QUERY"))))); + } + public void testTruncate_ReducesInputTextSizeByHalf() throws IOException { var model = "model"; var input = "abcd"; - var request = createRequest(model, input, null); + var request = createRequest(model, input, null, null); var truncatedRequest = request.truncate(); var httpRequest = truncatedRequest.createHttpRequest(); @@ -87,8 +106,13 @@ public void testTruncate_ReducesInputTextSizeByHalf() throws IOException { assertThat(requestMap, is(Map.of("instances", List.of(Map.of("content", "ab"))))); } - private static GoogleVertexAiEmbeddingsRequest createRequest(String modelId, String input, @Nullable Boolean autoTruncate) { - var embeddingsModel = GoogleVertexAiEmbeddingsModelTests.createModel(modelId, autoTruncate); + private static GoogleVertexAiEmbeddingsRequest createRequest( + String modelId, + String input, + @Nullable Boolean autoTruncate, + @Nullable InputType inputType + ) { + var embeddingsModel = GoogleVertexAiEmbeddingsModelTests.createModel(modelId, autoTruncate, inputType); return new GoogleVertexAiEmbeddingsWithoutAuthRequest( TruncatorTests.createTruncator(), diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java index 6f28301078853..906a825e49561 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/GoogleVertexAiServiceTests.java @@ -13,8 +13,10 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.InferenceServiceConfiguration; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; import org.elasticsearch.inference.TaskType; @@ -109,7 +111,7 @@ public void testParseRequestConfig_CreatesGoogleVertexAiEmbeddingsModel() throws projectId ) ), - new HashMap<>(Map.of()), + getTaskSettingsMap(true, InputType.INGEST), getSecretSettingsMap(serviceAccountJson) ), modelListener @@ -154,7 +156,7 @@ public void testParseRequestConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenChun projectId ) ), - new HashMap<>(Map.of()), + getTaskSettingsMap(true, InputType.INGEST), createRandomChunkingSettingsMap(), getSecretSettingsMap(serviceAccountJson) ), @@ -200,7 +202,7 @@ public void testParseRequestConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenChun projectId ) ), - new HashMap<>(Map.of()), + getTaskSettingsMap(false, InputType.SEARCH), getSecretSettingsMap(serviceAccountJson) ), modelListener @@ -281,7 +283,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInConfig() throws I "project" ) ), - getTaskSettingsMap(true), + getTaskSettingsMap(true, InputType.SEARCH), getSecretSettingsMap("{}") ); config.put("extra_key", "value"); @@ -308,7 +310,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInServiceSettingsMa ); serviceSettings.put("extra_key", "value"); - var config = getRequestConfigMap(serviceSettings, getTaskSettingsMap(true), getSecretSettingsMap("{}")); + var config = getRequestConfigMap(serviceSettings, getTaskSettingsMap(true, InputType.CLUSTERING), getSecretSettingsMap("{}")); var failureListener = getModelListenerForException( ElasticsearchStatusException.class, @@ -362,7 +364,7 @@ public void testParseRequestConfig_ThrowsWhenAnExtraKeyExistsInSecretSettingsMap "project" ) ), - getTaskSettingsMap(true), + getTaskSettingsMap(true, null), secretSettings ); @@ -399,7 +401,7 @@ public void testParsePersistedConfigWithSecrets_CreatesGoogleVertexAiEmbeddingsM true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, InputType.SEARCH), getSecretSettingsMap(serviceAccountJson) ); @@ -417,7 +419,7 @@ public void testParsePersistedConfigWithSecrets_CreatesGoogleVertexAiEmbeddingsM assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.SEARCH))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -447,7 +449,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAGoogleVertexAiEmbeddings true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), createRandomChunkingSettingsMap(), getSecretSettingsMap(serviceAccountJson) ); @@ -466,7 +468,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAGoogleVertexAiEmbeddings assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } @@ -497,7 +499,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModelWhenChun true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), getSecretSettingsMap(serviceAccountJson) ); @@ -515,7 +517,7 @@ public void testParsePersistedConfigWithSecrets_CreatesAnEmbeddingsModelWhenChun assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } @@ -573,7 +575,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, InputType.INGEST), getSecretSettingsMap(serviceAccountJson) ); persistedConfig.config().put("extra_key", "value"); @@ -592,7 +594,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.INGEST))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -625,7 +627,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), secretSettingsMap ); @@ -643,7 +645,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -676,7 +678,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists var persistedConfig = getPersistedConfigMap( serviceSettingsMap, - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, InputType.CLUSTERING), getSecretSettingsMap(serviceAccountJson) ); @@ -694,7 +696,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.CLUSTERING))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -711,7 +713,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists """; try (var service = createGoogleVertexAiService()) { - var taskSettings = getTaskSettingsMap(autoTruncate); + var taskSettings = getTaskSettingsMap(autoTruncate, InputType.SEARCH); taskSettings.put("extra_key", "value"); var persistedConfig = getPersistedConfigMap( @@ -745,7 +747,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, InputType.SEARCH))); assertThat(embeddingsModel.getSecretSettings().serviceAccountJson().toString(), is(serviceAccountJson)); } } @@ -770,7 +772,7 @@ public void testParsePersistedConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenCh true ) ), - getTaskSettingsMap(autoTruncate), + getTaskSettingsMap(autoTruncate, null), createRandomChunkingSettingsMap() ); @@ -783,7 +785,7 @@ public void testParsePersistedConfig_CreatesAGoogleVertexAiEmbeddingsModelWhenCh assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); } } @@ -808,7 +810,7 @@ public void testParsePersistedConfig_CreatesAnEmbeddingsModelWhenChunkingSetting true ) ), - getTaskSettingsMap(autoTruncate) + getTaskSettingsMap(autoTruncate, null) ); var model = service.parsePersistedConfig("id", TaskType.TEXT_EMBEDDING, persistedConfig.config()); @@ -820,7 +822,7 @@ public void testParsePersistedConfig_CreatesAnEmbeddingsModelWhenChunkingSetting assertThat(embeddingsModel.getServiceSettings().location(), is(location)); assertThat(embeddingsModel.getServiceSettings().projectId(), is(projectId)); assertThat(embeddingsModel.getServiceSettings().dimensionsSetByUser(), is(Boolean.TRUE)); - assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(embeddingsModel.getTaskSettings(), is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); assertThat(embeddingsModel.getConfigurations().getChunkingSettings(), instanceOf(ChunkingSettings.class)); } } @@ -838,12 +840,44 @@ public void testGetConfiguration() throws Exception { { "task_type": "text_embedding", "configuration": { + "input_type": { + "default_value": null, + "depends_on": [], + "display": "dropdown", + "label": "Input Type", + "options": [ + { + "label": "classification", + "value": "classification" + }, + { + "label": "clustering", + "value": "clustering" + }, + { + "label": "ingest", + "value": "ingest" + }, + { + "label": "search", + "value": "search" + } + ], + "order": 1, + "required": false, + "sensitive": false, + "tooltip": "Specifies the type of input passed to the model.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "" + }, "auto_truncate": { "default_value": null, "depends_on": [], "display": "toggle", "label": "Auto Truncate", - "order": 1, + "order": 2, "required": false, "sensitive": false, "tooltip": "Specifies if the API truncates inputs longer than the maximum token length automatically.", @@ -1005,11 +1039,15 @@ private static ActionListener getModelListenerForException(Class excep }); } - private static Map getTaskSettingsMap(Boolean autoTruncate) { + private static Map getTaskSettingsMap(Boolean autoTruncate, @Nullable InputType inputType) { var taskSettings = new HashMap(); taskSettings.put(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, autoTruncate); + if (inputType != null) { + taskSettings.put(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, inputType.toString()); + } + return taskSettings; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java index 68d03d350d06e..7836c5c15cfb1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsModelTests.java @@ -10,14 +10,18 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.inference.SimilarityMeasure; import org.elasticsearch.inference.TaskType; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.inference.services.googlevertexai.GoogleVertexAiSecretSettings; +import org.hamcrest.MatcherAssert; import java.net.URI; import java.net.URISyntaxException; +import java.util.Map; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettingsTests.getTaskSettingsMap; import static org.hamcrest.Matchers.is; public class GoogleVertexAiEmbeddingsModelTests extends ESTestCase { @@ -45,6 +49,75 @@ public void testBuildUri() throws URISyntaxException { ); } + public void testOverrideWith_DoesNotOverrideAndModelRemainsEqual_WhenSettingsAreEmpty_AndInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.SEARCH); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, Map.of(), InputType.UNSPECIFIED); + + MatcherAssert.assertThat(overriddenModel, is(model)); + } + + public void testOverrideWith_DoesNotOverrideAndModelRemainsEqual_WhenSettingsAreNull_AndInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.SEARCH); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, null, InputType.UNSPECIFIED); + + MatcherAssert.assertThat(overriddenModel, is(model)); + } + + public void testOverrideWith_SetsInputTypeToOverride_WhenFieldIsNullInModelTaskSettings_AndNullInRequestTaskSettings() { + var model = createModel("model", Boolean.FALSE, null); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.SEARCH); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_SetsInputType_FromRequest_IfValid_OverridingStoredTaskSettings() { + var model = createModel("model", Boolean.FALSE, InputType.INGEST); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.SEARCH); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_SetsInputType_FromRequest_IfValid_OverridingRequestTaskSettings() { + var model = createModel("model", Boolean.FALSE, null); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, InputType.CLUSTERING), InputType.SEARCH); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_OverridesInputType_WithRequestTaskSettingsSearch_WhenRequestInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.INGEST); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, InputType.SEARCH), InputType.UNSPECIFIED); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.SEARCH); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_DoesNotSetInputType_FromRequest_IfInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, null); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.UNSPECIFIED); + + var expectedModel = createModel("model", Boolean.FALSE, null); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_DoesNotSetInputType_WhenRequestTaskSettingsIsNull_AndRequestInputTypeIsInvalid() { + var model = createModel("model", Boolean.FALSE, InputType.INGEST); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, getTaskSettingsMap(null, null), InputType.UNSPECIFIED); + + var expectedModel = createModel("model", Boolean.FALSE, InputType.INGEST); + MatcherAssert.assertThat(overriddenModel, is(expectedModel)); + } + + public void testOverrideWith_DoesNotOverrideModelUri() { + var model = createModel("model", Boolean.FALSE, InputType.SEARCH); + var overriddenModel = GoogleVertexAiEmbeddingsModel.of(model, Map.of(), null); + + MatcherAssert.assertThat(overriddenModel.uri(), is(model.uri())); + } + public static GoogleVertexAiEmbeddingsModel createModel( String location, String projectId, @@ -58,12 +131,37 @@ public static GoogleVertexAiEmbeddingsModel createModel( "service", uri, new GoogleVertexAiEmbeddingsServiceSettings(location, projectId, modelId, false, null, null, null, null), - new GoogleVertexAiEmbeddingsTaskSettings(Boolean.FALSE), + new GoogleVertexAiEmbeddingsTaskSettings(Boolean.FALSE, null), new GoogleVertexAiSecretSettings(new SecureString(serviceAccountJson.toCharArray())) ); } - public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullable Boolean autoTruncate) { + public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullable Boolean autoTruncate, @Nullable InputType inputType) { + return new GoogleVertexAiEmbeddingsModel( + "id", + TaskType.TEXT_EMBEDDING, + "service", + new GoogleVertexAiEmbeddingsServiceSettings( + "location", + "projectId", + modelId, + false, + null, + null, + SimilarityMeasure.DOT_PRODUCT, + null + ), + new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType), + null, + new GoogleVertexAiSecretSettings(new SecureString("testString".toCharArray())) + ); + } + + public static GoogleVertexAiEmbeddingsModel createRandomizedModel( + String modelId, + @Nullable Boolean autoTruncate, + @Nullable InputType inputType + ) { return new GoogleVertexAiEmbeddingsModel( "id", TaskType.TEXT_EMBEDDING, @@ -78,7 +176,7 @@ public static GoogleVertexAiEmbeddingsModel createModel(String modelId, @Nullabl SimilarityMeasure.DOT_PRODUCT, null ), - new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate), + new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType), null, new GoogleVertexAiSecretSettings(new SecureString(randomAlphaOfLength(8).toCharArray())) ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java index 1e9a2f435cb08..a49e0f2e3f57d 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsRequestTaskSettingsTests.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.inference.InputType; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; @@ -21,9 +23,14 @@ public void testFromMap_ReturnsEmptySettings_IfMapEmpty() { assertThat(requestTaskSettings, is(GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS)); } + public void testFromMap_ReturnsEmptySettings_IfMapNull() { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(null); + assertThat(requestTaskSettings, is(GoogleVertexAiEmbeddingsRequestTaskSettings.EMPTY_SETTINGS)); + } + public void testFromMap_DoesNotThrowValidationException_IfAutoTruncateIsMissing() { var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of("unrelated", true))); - assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(null))); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(null, null))); } public void testFromMap_ExtractsAutoTruncate() { @@ -31,6 +38,40 @@ public void testFromMap_ExtractsAutoTruncate() { var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, autoTruncate)) ); - assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate))); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(autoTruncate, null))); + } + + public void testFromMap_ThrowsValidationException_IfAutoTruncateIsInvalidValue() { + expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, "invalid")) + ) + ); + } + + public void testFromMap_ExtractsInputType() { + var requestTaskSettings = GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, InputType.INGEST.toString())) + ); + assertThat(requestTaskSettings, is(new GoogleVertexAiEmbeddingsRequestTaskSettings(null, InputType.INGEST))); + } + + public void testFromMap_ThrowsValidationException_IfInputTypeIsInvalidValue() { + expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, "abc")) + ) + ); + } + + public void testFromMap_ThrowsValidationException_IfInputTypeIsUnspecified() { + expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsRequestTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, InputType.UNSPECIFIED.toString())) + ) + ); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java index 5b87bbc3c42c8..0a390b114702c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googlevertexai/embeddings/GoogleVertexAiEmbeddingsTaskSettingsTests.java @@ -8,21 +8,30 @@ package org.elasticsearch.xpack.inference.services.googlevertexai.embeddings; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.InputType; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; +import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; +import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.inference.InputTypeTests.randomWithoutUnspecified; import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE; +import static org.elasticsearch.xpack.inference.services.googlevertexai.embeddings.GoogleVertexAiEmbeddingsTaskSettings.VALID_REQUEST_VALUES; import static org.hamcrest.Matchers.is; public class GoogleVertexAiEmbeddingsTaskSettingsTests extends AbstractBWCWireSerializationTestCase { @@ -39,6 +48,9 @@ public void testUpdatedTaskSettings() { if (newSettings.autoTruncate() != null) { newSettingsMap.put(GoogleVertexAiEmbeddingsTaskSettings.AUTO_TRUNCATE, newSettings.autoTruncate()); } + if (newSettings.getInputType() != null) { + newSettingsMap.put(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, newSettings.getInputType().toString()); + } GoogleVertexAiEmbeddingsTaskSettings updatedSettings = (GoogleVertexAiEmbeddingsTaskSettings) initialSettings.updatedTaskSettings( Collections.unmodifiableMap(newSettingsMap) ); @@ -47,56 +59,144 @@ public void testUpdatedTaskSettings() { } else { assertEquals(newSettings.autoTruncate(), updatedSettings.autoTruncate()); } + if (newSettings.getInputType() == null) { + assertEquals(initialSettings.getInputType(), updatedSettings.getInputType()); + } else { + assertEquals(newSettings.getInputType(), updatedSettings.getInputType()); + } + } + + public void testFromMap_CreatesEmptySettings_WhenAllFieldsAreNull() { + MatcherAssert.assertThat( + GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()), + is(new GoogleVertexAiEmbeddingsTaskSettings(null, null)) + ); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()).autoTruncate()); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()).getInputType()); + } + + public void testFromMap_CreatesEmptySettings_WhenMapIsNull() { + MatcherAssert.assertThat( + GoogleVertexAiEmbeddingsTaskSettings.fromMap(null), + is(new GoogleVertexAiEmbeddingsTaskSettings(null, null)) + ); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(null).autoTruncate()); + assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(null).getInputType()); } public void testFromMap_AutoTruncateIsSet() { var autoTruncate = true; - var taskSettingsMap = getTaskSettingsMap(autoTruncate); + var taskSettingsMap = getTaskSettingsMap(autoTruncate, null); var taskSettings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettingsMap); - assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate))); + assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, null))); } public void testFromMap_ThrowsValidationException_IfAutoTruncateIsInvalidValue() { - var taskSettings = getTaskSettingsMap("invalid"); + var taskSettings = getTaskSettingsMap("invalid", null); expectThrows(ValidationException.class, () -> GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettings)); } public void testFromMap_AutoTruncateIsNull() { - var taskSettingsMap = getTaskSettingsMap(null); + var taskSettingsMap = getTaskSettingsMap(null, null); var taskSettings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(taskSettingsMap); // needed, because of constructors being ambiguous otherwise Boolean nullBoolean = null; - assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(nullBoolean))); + assertThat(taskSettings, is(new GoogleVertexAiEmbeddingsTaskSettings(nullBoolean, null))); } - public void testFromMap_DoesNotThrow_WithEmptyMap() { - assertNull(GoogleVertexAiEmbeddingsTaskSettings.fromMap(new HashMap<>()).autoTruncate()); + public void testFromMap_ReturnsFailure_WhenInputTypeIsInvalid() { + var exception = expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, "abc")) + ) + ); + + assertThat( + exception.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [task_settings] Invalid value [abc] received. [input_type] must be one of [%s];", + getValidValuesSortedAndCombined(VALID_REQUEST_VALUES) + ) + ) + ); + } + + public void testFromMap_ReturnsFailure_WhenInputTypeIsUnspecified() { + var exception = expectThrows( + ValidationException.class, + () -> GoogleVertexAiEmbeddingsTaskSettings.fromMap( + new HashMap<>(Map.of(GoogleVertexAiEmbeddingsTaskSettings.INPUT_TYPE, InputType.UNSPECIFIED.toString())) + ) + ); + + assertThat( + exception.getMessage(), + is( + Strings.format( + "Validation Failed: 1: [task_settings] Invalid value [unspecified] received. [input_type] must be one of [%s];", + getValidValuesSortedAndCombined(VALID_REQUEST_VALUES) + ) + ) + ); } public void testOf_UseRequestSettings() { var originalAutoTruncate = true; - var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate); + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, null); var requestAutoTruncate = originalAutoTruncate == false; - var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(requestAutoTruncate); + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(requestAutoTruncate, null); - assertThat(GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings).autoTruncate(), is(requestAutoTruncate)); + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, null).autoTruncate(), + is(requestAutoTruncate) + ); + } + + public void testOf_UseRequestSettings_AndRequestInputType() { + var originalAutoTruncate = true; + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, InputType.SEARCH); + + var requestAutoTruncate = originalAutoTruncate == false; + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(requestAutoTruncate, null); + + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, InputType.INGEST).getInputType(), + is(InputType.INGEST) + ); } public void testOf_UseOriginalSettings() { var originalAutoTruncate = true; - var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate); + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, null); - var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(null); + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(null, null); - assertThat(GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings).autoTruncate(), is(originalAutoTruncate)); + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, null).autoTruncate(), + is(originalAutoTruncate) + ); + } + + public void testOf_UseOriginalSettings_WithInputType() { + var originalAutoTruncate = true; + var originalSettings = new GoogleVertexAiEmbeddingsTaskSettings(originalAutoTruncate, InputType.INGEST); + + var requestTaskSettings = new GoogleVertexAiEmbeddingsRequestTaskSettings(null, null); + + assertThat( + GoogleVertexAiEmbeddingsTaskSettings.of(originalSettings, requestTaskSettings, null).autoTruncate(), + is(originalAutoTruncate) + ); } public void testToXContent_WritesAutoTruncateIfNotNull() throws IOException { - var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(true)); + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(true, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); settings.toXContent(builder, null); @@ -107,7 +207,7 @@ public void testToXContent_WritesAutoTruncateIfNotNull() throws IOException { } public void testToXContent_DoesNotWriteAutoTruncateIfNull() throws IOException { - var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(null)); + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(null, null)); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); settings.toXContent(builder, null); @@ -117,6 +217,25 @@ public void testToXContent_DoesNotWriteAutoTruncateIfNull() throws IOException { {}""")); } + public void testToXContent_WritesInputTypeIfNotNull() throws IOException { + var settings = GoogleVertexAiEmbeddingsTaskSettings.fromMap(getTaskSettingsMap(true, InputType.INGEST)); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + settings.toXContent(builder, null); + String xContentResult = Strings.toString(builder); + + assertThat(xContentResult, is(""" + {"input_type":"ingest","auto_truncate":true}""")); + } + + public void testToXContent_ThrowsAssertionFailure_WhenInputTypeIsUnspecified() { + var thrownException = expectThrows( + AssertionError.class, + () -> new GoogleVertexAiEmbeddingsTaskSettings(false, InputType.UNSPECIFIED) + ); + assertThat(thrownException.getMessage(), is("received invalid input type value [unspecified]")); + } + @Override protected Writeable.Reader instanceReader() { return GoogleVertexAiEmbeddingsTaskSettings::new; @@ -137,20 +256,37 @@ protected GoogleVertexAiEmbeddingsTaskSettings mutateInstanceForVersion( GoogleVertexAiEmbeddingsTaskSettings instance, TransportVersion version ) { + if (version.before(TransportVersions.VERTEX_AI_INPUT_TYPE_ADDED)) { + // default to null input type if node is on a version before input type was introduced + return new GoogleVertexAiEmbeddingsTaskSettings(instance.autoTruncate(), null); + } return instance; } private static GoogleVertexAiEmbeddingsTaskSettings createRandom() { - return new GoogleVertexAiEmbeddingsTaskSettings(randomFrom(new Boolean[] { null, randomBoolean() })); + var inputType = randomBoolean() ? randomWithoutUnspecified() : null; + var autoTruncate = randomFrom(new Boolean[] { null, randomBoolean() }); + return new GoogleVertexAiEmbeddingsTaskSettings(autoTruncate, inputType); + } + + private static > String getValidValuesSortedAndCombined(EnumSet validValues) { + var validValuesAsStrings = validValues.stream().map(value -> value.toString().toLowerCase(Locale.ROOT)).toArray(String[]::new); + Arrays.sort(validValuesAsStrings); + + return String.join(", ", validValuesAsStrings); } - private static Map getTaskSettingsMap(@Nullable Object autoTruncate) { + public static Map getTaskSettingsMap(@Nullable Object autoTruncate, @Nullable InputType inputType) { var map = new HashMap(); if (autoTruncate != null) { map.put(AUTO_TRUNCATE, autoTruncate); } + if (inputType != null) { + map.put(INPUT_TYPE, inputType.toString()); + } + return map; } } From 778ab8fee362e5a17195f24d23dbbf6ee88557c4 Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:39:41 +0100 Subject: [PATCH 60/95] Re-structure document ID generation favoring `_id` inverted index compression (#104683) This implementation restructures auto-generated document IDs to maximize compression within Lucene's terms dictionary. The key insight is placing stable or slowly-changing components at the start of the ID - the most significant bytes of the timestamp change very gradually (the first byte shifts only every 35 years, the second every 50 days). This careful ordering means that large sequences of IDs generated close in time will share common prefixes, allowing Lucene's Finite State Transducer (FST) to store terms more compactly. To maintain uniqueness while preserving these compression benefits, the ID combines three elements: a timestamp that ensures time-based ordering, the coordinator's MAC address for cluster-wide uniqueness, and a sequence number for handling high-throughput scenarios. The timestamp handling is particularly robust, using atomic operations to prevent backwards movement even if the system clock shifts. For high-volume indices generating millions of documents, this optimization can lead to substantial storage savings while maintaining strict guarantees about ID uniqueness and ordering. --- docs/changelog/104683.yaml | 5 + .../action/bulk/BulkIntegrationIT.java | 6 +- .../action/index/IndexRequest.java | 29 +++- .../cluster/routing/IndexRouting.java | 12 +- .../TimeBasedKOrderedUUIDGenerator.java | 73 ++++++++++ .../common/TimeBasedUUIDGenerator.java | 4 +- .../java/org/elasticsearch/common/UUIDs.java | 10 ++ .../elasticsearch/index/IndexVersions.java | 1 + .../action/index/IndexRequestTests.java | 6 + .../org/elasticsearch/common/UUIDTests.java | 134 ++++++++++++------ 10 files changed, 225 insertions(+), 55 deletions(-) create mode 100644 docs/changelog/104683.yaml create mode 100644 server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java diff --git a/docs/changelog/104683.yaml b/docs/changelog/104683.yaml new file mode 100644 index 0000000000000..d4f40b59cfd91 --- /dev/null +++ b/docs/changelog/104683.yaml @@ -0,0 +1,5 @@ +pr: 104683 +summary: "Feature: re-structure document ID generation favoring _id inverted index compression" +area: Logs +type: enhancement +issues: [] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index 34170d7c0f747..e45555b1dec19 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -99,7 +99,11 @@ public void testBulkWithWriteIndexAndRouting() { // allowing the auto-generated timestamp to externally be set would allow making the index inconsistent with duplicate docs public void testExternallySetAutoGeneratedTimestamp() { IndexRequest indexRequest = new IndexRequest("index1").source(Collections.singletonMap("foo", "baz")); - indexRequest.autoGenerateId(); + if (randomBoolean()) { + indexRequest.autoGenerateId(); + } else { + indexRequest.autoGenerateTimeBasedId(); + } if (randomBoolean()) { indexRequest.id("test"); } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index d0785a60dd0f5..c0811e7424b0d 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -51,6 +51,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; @@ -76,6 +77,9 @@ public class IndexRequest extends ReplicatedWriteRequest implement private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_10_X; + private static final Supplier ID_GENERATOR = UUIDs::base64UUID; + private static final Supplier K_SORTED_TIME_BASED_ID_GENERATOR = UUIDs::base64TimeBasedKOrderedUUID; + /** * Max length of the source document to include into string() * @@ -692,10 +696,18 @@ public void process(IndexRouting indexRouting) { * request compatible with the append-only optimization. */ public void autoGenerateId() { - assert id == null; - assert autoGeneratedTimestamp == UNSET_AUTO_GENERATED_TIMESTAMP : "timestamp has already been generated!"; - assert ifSeqNo == UNASSIGNED_SEQ_NO; - assert ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM; + assertBeforeGeneratingId(); + autoGenerateTimestamp(); + id(ID_GENERATOR.get()); + } + + public void autoGenerateTimeBasedId() { + assertBeforeGeneratingId(); + autoGenerateTimestamp(); + id(K_SORTED_TIME_BASED_ID_GENERATOR.get()); + } + + private void autoGenerateTimestamp() { /* * Set the auto generated timestamp so the append only optimization * can quickly test if this request *must* be unique without reaching @@ -704,8 +716,13 @@ public void autoGenerateId() { * never work before 1970, but that's ok. It's after 1970. */ autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); - String uid = UUIDs.base64UUID(); - id(uid); + } + + private void assertBeforeGeneratingId() { + assert id == null; + assert autoGeneratedTimestamp == UNSET_AUTO_GENERATED_TIMESTAMP : "timestamp has already been generated!"; + assert ifSeqNo == UNASSIGNED_SEQ_NO; + assert ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM; } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index 3fb3c182f89cd..1c89d3bf259b5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -24,6 +24,8 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.transport.Transports; @@ -147,11 +149,15 @@ public void checkIndexSplitAllowed() {} private abstract static class IdAndRoutingOnly extends IndexRouting { private final boolean routingRequired; + private final IndexVersion creationVersion; + private final IndexMode indexMode; IdAndRoutingOnly(IndexMetadata metadata) { super(metadata); + this.creationVersion = metadata.getCreationVersion(); MappingMetadata mapping = metadata.mapping(); this.routingRequired = mapping == null ? false : mapping.routingRequired(); + this.indexMode = metadata.getIndexMode(); } protected abstract int shardId(String id, @Nullable String routing); @@ -161,7 +167,11 @@ public void process(IndexRequest indexRequest) { // generate id if not already provided final String id = indexRequest.id(); if (id == null) { - indexRequest.autoGenerateId(); + if (creationVersion.onOrAfter(IndexVersions.TIME_BASED_K_ORDERED_DOC_ID) && indexMode == IndexMode.LOGSDB) { + indexRequest.autoGenerateTimeBasedId(); + } else { + indexRequest.autoGenerateId(); + } } else if (id.isEmpty()) { throw new IllegalArgumentException("if _id is specified it must not be empty"); } diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java new file mode 100644 index 0000000000000..9c97cb8fe7e85 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedKOrderedUUIDGenerator.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common; + +import java.nio.ByteBuffer; +import java.util.Base64; + +/** + * Generates a base64-encoded, k-ordered UUID string optimized for compression and efficient indexing. + *

+ * This method produces a time-based UUID where slowly changing components like the timestamp appear first, + * improving prefix-sharing and compression during indexing. It ensures uniqueness across nodes by incorporating + * a timestamp, a MAC address, and a sequence ID. + *

+ * Timestamp: Represents the current time in milliseconds, ensuring ordering and uniqueness. + *
+ * MAC Address: Ensures uniqueness across different coordinators. + *
+ * Sequence ID: Differentiates UUIDs generated within the same millisecond, ensuring uniqueness even at high throughput. + *

+ * The result is a compact base64-encoded string, optimized for efficient compression of the _id field in an inverted index. + */ +public class TimeBasedKOrderedUUIDGenerator extends TimeBasedUUIDGenerator { + private static final Base64.Encoder BASE_64_NO_PADDING = Base64.getEncoder().withoutPadding(); + + @Override + public String getBase64UUID() { + final int sequenceId = this.sequenceNumber.incrementAndGet() & 0x00FF_FFFF; + + // Calculate timestamp to ensure ordering and avoid backward movement in case of time shifts. + // Uses AtomicLong to guarantee that timestamp increases even if the system clock moves backward. + // If the sequenceId overflows (reaches 0 within the same millisecond), the timestamp is incremented + // to ensure strict ordering. + long timestamp = this.lastTimestamp.accumulateAndGet( + currentTimeMillis(), + sequenceId == 0 ? (lastTimestamp, currentTimeMillis) -> Math.max(lastTimestamp, currentTimeMillis) + 1 : Math::max + ); + + final byte[] uuidBytes = new byte[15]; + final ByteBuffer buffer = ByteBuffer.wrap(uuidBytes); + + buffer.put((byte) (timestamp >>> 40)); // changes every 35 years + buffer.put((byte) (timestamp >>> 32)); // changes every ~50 days + buffer.put((byte) (timestamp >>> 24)); // changes every ~4.5h + buffer.put((byte) (timestamp >>> 16)); // changes every ~65 secs + + // MAC address of the coordinator might change if there are many coordinators in the cluster + // and the indexing api does not necessarily target the same coordinator. + byte[] macAddress = macAddress(); + assert macAddress.length == 6; + buffer.put(macAddress, 0, macAddress.length); + + buffer.put((byte) (sequenceId >>> 16)); + + // From hereinafter everything is almost like random and does not compress well + // due to unlikely prefix-sharing + buffer.put((byte) (timestamp >>> 8)); + buffer.put((byte) (sequenceId >>> 8)); + buffer.put((byte) timestamp); + buffer.put((byte) sequenceId); + + assert buffer.position() == uuidBytes.length; + + return BASE_64_NO_PADDING.encodeToString(uuidBytes); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java b/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java index 73528ed0d3866..2ed979ae66ffa 100644 --- a/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java +++ b/server/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java @@ -24,10 +24,10 @@ class TimeBasedUUIDGenerator implements UUIDGenerator { // We only use bottom 3 bytes for the sequence number. Paranoia: init with random int so that if JVM/OS/machine goes down, clock slips // backwards, and JVM comes back up, we are less likely to be on the same sequenceNumber at the same time: - private final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); + protected final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); // Used to ensure clock moves forward: - private final AtomicLong lastTimestamp = new AtomicLong(0); + protected final AtomicLong lastTimestamp = new AtomicLong(0); private static final byte[] SECURE_MUNGED_ADDRESS = MacAddressProvider.getSecureMungedAddress(); diff --git a/server/src/main/java/org/elasticsearch/common/UUIDs.java b/server/src/main/java/org/elasticsearch/common/UUIDs.java index 61ee4bd5d64ab..0f73b8172c10f 100644 --- a/server/src/main/java/org/elasticsearch/common/UUIDs.java +++ b/server/src/main/java/org/elasticsearch/common/UUIDs.java @@ -16,6 +16,8 @@ public class UUIDs { private static final RandomBasedUUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); + + private static final UUIDGenerator TIME_BASED_K_ORDERED_GENERATOR = new TimeBasedKOrderedUUIDGenerator(); private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator(); /** @@ -33,6 +35,14 @@ public static String base64UUID() { return TIME_UUID_GENERATOR.getBase64UUID(); } + public static String base64TimeBasedKOrderedUUID() { + return TIME_BASED_K_ORDERED_GENERATOR.getBase64UUID(); + } + + public static String base64TimeBasedUUID() { + return TIME_UUID_GENERATOR.getBase64UUID(); + } + /** * The length of a UUID string generated by {@link #randomBase64UUID} and {@link #randomBase64UUIDSecureString}. */ diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 440613263d441..9264b9e1c3a20 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -132,6 +132,7 @@ private static Version parseUnchecked(String version) { public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT_BACKPORT = def(8_519_00_0, Version.LUCENE_9_12_0); public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0); public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0); + public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 32297e0c09b8f..9d74c2069ec10 100644 --- a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -128,6 +128,12 @@ public void testAutoGenerateId() { assertTrue("expected > 0 but got: " + request.getAutoGeneratedTimestamp(), request.getAutoGeneratedTimestamp() > 0); } + public void testAutoGenerateTimeBasedId() { + IndexRequest request = new IndexRequest("index"); + request.autoGenerateTimeBasedId(); + assertTrue("expected > 0 but got: " + request.getAutoGeneratedTimestamp(), request.getAutoGeneratedTimestamp() > 0); + } + public void testIndexResponse() { ShardId shardId = new ShardId(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10), randomIntBetween(0, 1000)); String id = randomAlphaOfLengthBetween(3, 10); diff --git a/server/src/test/java/org/elasticsearch/common/UUIDTests.java b/server/src/test/java/org/elasticsearch/common/UUIDTests.java index 2e7dbb00aa2de..9fbeaf1c6c081 100644 --- a/server/src/test/java/org/elasticsearch/common/UUIDTests.java +++ b/server/src/test/java/org/elasticsearch/common/UUIDTests.java @@ -35,6 +35,7 @@ public class UUIDTests extends ESTestCase { static UUIDGenerator timeUUIDGen = new TimeBasedUUIDGenerator(); static UUIDGenerator randomUUIDGen = new RandomBasedUUIDGenerator(); + static UUIDGenerator kOrderedUUIDGen = new TimeBasedKOrderedUUIDGenerator(); public void testRandomUUID() { verifyUUIDSet(100000, randomUUIDGen); @@ -44,14 +45,49 @@ public void testTimeUUID() { verifyUUIDSet(100000, timeUUIDGen); } - public void testThreadedTimeUUID() { - testUUIDThreaded(timeUUIDGen); + public void testKOrderedUUID() { + verifyUUIDSet(100000, kOrderedUUIDGen); } public void testThreadedRandomUUID() { testUUIDThreaded(randomUUIDGen); } + public void testThreadedTimeUUID() { + testUUIDThreaded(timeUUIDGen); + } + + public void testThreadedKOrderedUUID() { + testUUIDThreaded(kOrderedUUIDGen); + } + + public void testCompression() throws Exception { + Logger logger = LogManager.getLogger(UUIDTests.class); + + assertThat(testCompression(timeUUIDGen, 100000, 10000, 3, logger), Matchers.lessThan(14d)); + assertThat(testCompression(timeUUIDGen, 100000, 1000, 3, logger), Matchers.lessThan(15d)); + assertThat(testCompression(timeUUIDGen, 100000, 100, 3, logger), Matchers.lessThan(21d)); + + assertThat(testCompression(kOrderedUUIDGen, 100000, 10000, 3, logger), Matchers.lessThan(13d)); + assertThat(testCompression(kOrderedUUIDGen, 100000, 1000, 3, logger), Matchers.lessThan(14d)); + assertThat(testCompression(kOrderedUUIDGen, 100000, 100, 3, logger), Matchers.lessThan(19d)); + } + + public void testComparativeCompression() throws Exception { + Logger logger = LogManager.getLogger(UUIDTests.class); + + int numDocs = 100000; + int docsPerSecond = 1000; + int nodes = 3; + + double randomCompression = testCompression(randomUUIDGen, numDocs, docsPerSecond, nodes, logger); + double baseCompression = testCompression(timeUUIDGen, numDocs, docsPerSecond, nodes, logger); + double kOrderedCompression = testCompression(kOrderedUUIDGen, numDocs, docsPerSecond, nodes, logger); + + assertThat(kOrderedCompression, Matchers.lessThanOrEqualTo(baseCompression)); + assertThat(kOrderedCompression, Matchers.lessThanOrEqualTo(randomCompression)); + } + Set verifyUUIDSet(int count, UUIDGenerator uuidSource) { HashSet uuidSet = new HashSet<>(); for (int i = 0; i < count; ++i) { @@ -109,49 +145,62 @@ public void testUUIDThreaded(UUIDGenerator uuidSource) { assertEquals(count * uuids, globalSet.size()); } - public void testCompression() throws Exception { - Logger logger = LogManager.getLogger(UUIDTests.class); - // Low number so that the test runs quickly, but the results are more interesting with larger numbers - // of indexed documents - assertThat(testCompression(100000, 10000, 3, logger), Matchers.lessThan(14d)); // ~12 in practice - assertThat(testCompression(100000, 1000, 3, logger), Matchers.lessThan(15d)); // ~13 in practice - assertThat(testCompression(100000, 100, 3, logger), Matchers.lessThan(21d)); // ~20 in practice - } - - private static double testCompression(int numDocs, int numDocsPerSecond, int numNodes, Logger logger) throws Exception { - final double intervalBetweenDocs = 1000. / numDocsPerSecond; // milliseconds + private static double testCompression(final UUIDGenerator generator, int numDocs, int numDocsPerSecond, int numNodes, Logger logger) + throws Exception { + final double intervalBetweenDocs = 1000. / numDocsPerSecond; final byte[][] macAddresses = new byte[numNodes][]; Random r = random(); for (int i = 0; i < macAddresses.length; ++i) { macAddresses[i] = new byte[6]; random().nextBytes(macAddresses[i]); } - UUIDGenerator generator = new TimeBasedUUIDGenerator() { - double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); - @Override - protected long currentTimeMillis() { - currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); - return (long) currentTimeMillis; + UUIDGenerator uuidSource = generator; + if (generator instanceof TimeBasedUUIDGenerator) { + if (generator instanceof TimeBasedKOrderedUUIDGenerator) { + uuidSource = new TimeBasedKOrderedUUIDGenerator() { + double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); + + @Override + protected long currentTimeMillis() { + currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); + return (long) currentTimeMillis; + } + + @Override + protected byte[] macAddress() { + return RandomPicks.randomFrom(r, macAddresses); + } + }; + } else { + uuidSource = new TimeBasedUUIDGenerator() { + double currentTimeMillis = TestUtil.nextLong(random(), 0L, 10000000000L); + + @Override + protected long currentTimeMillis() { + currentTimeMillis += intervalBetweenDocs * 2 * r.nextDouble(); + return (long) currentTimeMillis; + } + + @Override + protected byte[] macAddress() { + return RandomPicks.randomFrom(r, macAddresses); + } + }; } + } - @Override - protected byte[] macAddress() { - return RandomPicks.randomFrom(r, macAddresses); - } - }; - // Avoid randomization which will slow down things without improving - // the quality of this test Directory dir = newFSDirectory(createTempDir()); IndexWriterConfig config = new IndexWriterConfig().setCodec(Codec.forName(Lucene.LATEST_CODEC)) - .setMergeScheduler(new SerialMergeScheduler()); // for reproducibility + .setMergeScheduler(new SerialMergeScheduler()); + IndexWriter w = new IndexWriter(dir, config); Document doc = new Document(); StringField id = new StringField("_id", "", Store.NO); doc.add(id); long start = System.nanoTime(); for (int i = 0; i < numDocs; ++i) { - id.setStringValue(generator.getBase64UUID()); + id.setStringValue(uuidSource.getBase64UUID()); w.addDocument(doc); } w.forceMerge(1); @@ -164,30 +213,25 @@ protected byte[] macAddress() { dir.close(); double bytesPerDoc = (double) size / numDocs; logger.info( - numDocs - + " docs indexed at " - + numDocsPerSecond - + " docs/s required " - + ByteSizeValue.ofBytes(size) - + " bytes of disk space, or " - + bytesPerDoc - + " bytes per document. Took: " - + new TimeValue(time) - + "." + "{} - {} docs indexed at {} docs/s required {} bytes of disk space, or {} bytes per document. Took: {}.", + uuidSource.getClass().getSimpleName(), + numDocs, + numDocsPerSecond, + ByteSizeValue.ofBytes(size), + bytesPerDoc, + new TimeValue(time) ); return bytesPerDoc; } public void testStringLength() { assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, getUnpaddedBase64StringLength(RandomBasedUUIDGenerator.SIZE_IN_BYTES)); - assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, UUIDs.randomBase64UUID().length()); - assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, UUIDs.randomBase64UUID(random()).length()); - try (var secureString = UUIDs.randomBase64UUIDSecureString()) { - assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, secureString.toString().length()); - } - assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, getUnpaddedBase64StringLength(TimeBasedUUIDGenerator.SIZE_IN_BYTES)); - assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, UUIDs.base64UUID().length()); + assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, getUnpaddedBase64StringLength(TimeBasedKOrderedUUIDGenerator.SIZE_IN_BYTES)); + + assertEquals(UUIDs.RANDOM_BASED_UUID_STRING_LENGTH, randomUUIDGen.getBase64UUID().length()); + assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, timeUUIDGen.getBase64UUID().length()); + assertEquals(UUIDs.TIME_BASED_UUID_STRING_LENGTH, kOrderedUUIDGen.getBase64UUID().length()); } private static int getUnpaddedBase64StringLength(int sizeInBytes) { From d2e5c43c9baadaf9d8af84727323af6a3a33079f Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Tue, 12 Nov 2024 17:13:27 +0100 Subject: [PATCH 61/95] Export current node allocation stats as APM metrics (#116585) At the end of each reconciliation round, also export the current allocation stats for each node. This is intended to show the gradual progress (or divergence!) towards the desired values exported in https://github.com/elastic/elasticsearch/pull/115854, and relies on the existing `AllocationStatsService`. Relates ES-9873 --- .../DesiredBalanceReconcilerMetricsIT.java | 82 +++++++++++-- .../elasticsearch/cluster/ClusterModule.java | 18 ++- .../allocation/AllocationStatsService.java | 69 ++--------- .../NodeAllocationStatsProvider.java | 82 +++++++++++++ .../allocator/DesiredBalanceMetrics.java | 111 +++++++++++++++++- .../allocator/DesiredBalanceReconciler.java | 29 ++++- .../DesiredBalanceShardsAllocator.java | 13 +- ...nsportDeleteDesiredBalanceActionTests.java | 3 +- .../AllocationStatsServiceTests.java | 14 ++- .../ClusterAllocationSimulationTests.java | 3 +- .../allocator/DesiredBalanceMetricsTests.java | 6 +- .../DesiredBalanceReconcilerTests.java | 16 ++- .../DesiredBalanceShardsAllocatorTests.java | 24 ++-- .../cluster/ESAllocationTestCase.java | 19 ++- 14 files changed, 380 insertions(+), 109 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java index bfe46dc4c90f2..36374f7a3a8eb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java @@ -53,7 +53,7 @@ public void testDesiredBalanceGaugeMetricsAreOnlyPublishedByCurrentMaster() thro } } - public void testDesiredBalanceNodeWeightMetrics() { + public void testDesiredBalanceMetrics() { internalCluster().startNodes(2); prepareCreate("test").setSettings(indexSettings(2, 1)).get(); indexRandom(randomBoolean(), "test", between(50, 100)); @@ -68,38 +68,83 @@ public void testDesiredBalanceNodeWeightMetrics() { var nodeIds = internalCluster().clusterService().state().nodes().stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); var nodeNames = internalCluster().clusterService().state().nodes().stream().map(DiscoveryNode::getName).collect(Collectors.toSet()); - final var nodeWeightsMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var desiredBalanceNodeWeightsMetrics = telemetryPlugin.getDoubleGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME ); - assertThat(nodeWeightsMetrics.size(), equalTo(2)); - for (var nodeStat : nodeWeightsMetrics) { + assertThat(desiredBalanceNodeWeightsMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeWeightsMetrics) { assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var nodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( + final var desiredBalanceNodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME ); - assertThat(nodeShardCountMetrics.size(), equalTo(2)); - for (var nodeStat : nodeShardCountMetrics) { + assertThat(desiredBalanceNodeShardCountMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeShardCountMetrics) { assertThat(nodeStat.value().longValue(), equalTo(2L)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var nodeWriteLoadMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var desiredBalanceNodeWriteLoadMetrics = telemetryPlugin.getDoubleGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_WRITE_LOAD_METRIC_NAME ); - assertThat(nodeWriteLoadMetrics.size(), equalTo(2)); - for (var nodeStat : nodeWriteLoadMetrics) { + assertThat(desiredBalanceNodeWriteLoadMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeWriteLoadMetrics) { assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var nodeDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var desiredBalanceNodeDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( DesiredBalanceMetrics.DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME ); - assertThat(nodeDiskUsageMetrics.size(), equalTo(2)); - for (var nodeStat : nodeDiskUsageMetrics) { + assertThat(desiredBalanceNodeDiskUsageMetrics.size(), equalTo(2)); + for (var nodeStat : desiredBalanceNodeDiskUsageMetrics) { + assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_SHARD_COUNT_METRIC_NAME + ); + assertThat(currentNodeShardCountMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeShardCountMetrics) { + assertThat(nodeStat.value().longValue(), equalTo(2L)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeWriteLoadMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_WRITE_LOAD_METRIC_NAME + ); + assertThat(currentNodeWriteLoadMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeWriteLoadMetrics) { + assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_DISK_USAGE_METRIC_NAME + ); + assertThat(currentNodeDiskUsageMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeDiskUsageMetrics) { + assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeUndesiredShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_UNDESIRED_SHARD_COUNT_METRIC_NAME + ); + assertThat(currentNodeUndesiredShardCountMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeUndesiredShardCountMetrics) { + assertThat(nodeStat.value().longValue(), greaterThanOrEqualTo(0L)); + assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); + assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); + } + final var currentNodeForecastedDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + DesiredBalanceMetrics.CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME + ); + assertThat(currentNodeForecastedDiskUsageMetrics.size(), equalTo(2)); + for (var nodeStat : currentNodeForecastedDiskUsageMetrics) { assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); @@ -136,6 +181,17 @@ private static void assertMetricsAreBeingPublished(String nodeName, boolean shou testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME), matcher ); + assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_WRITE_LOAD_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_DISK_USAGE_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_SHARD_COUNT_METRIC_NAME), matcher); + assertThat( + testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME), + matcher + ); + assertThat( + testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_UNDESIRED_SHARD_COUNT_METRIC_NAME), + matcher + ); } private static TestTelemetryPlugin getTelemetryPlugin(String nodeName) { diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 0383bbb9bd401..046f4b6b0b251 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; import org.elasticsearch.cluster.routing.allocation.AllocationStatsService; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; @@ -138,6 +139,7 @@ public ClusterModule( this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); this.allocationDeciders = new AllocationDeciders(deciderList); + var nodeAllocationStatsProvider = new NodeAllocationStatsProvider(writeLoadForecaster); this.shardsAllocator = createShardsAllocator( settings, clusterService.getClusterSettings(), @@ -146,7 +148,8 @@ public ClusterModule( clusterService, this::reconcile, writeLoadForecaster, - telemetryProvider + telemetryProvider, + nodeAllocationStatsProvider ); this.clusterService = clusterService; this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadPool.getThreadContext(), systemIndices); @@ -160,7 +163,12 @@ public ClusterModule( ); this.allocationService.addAllocFailuresResetListenerTo(clusterService); this.metadataDeleteIndexService = new MetadataDeleteIndexService(settings, clusterService, allocationService); - this.allocationStatsService = new AllocationStatsService(clusterService, clusterInfoService, shardsAllocator, writeLoadForecaster); + this.allocationStatsService = new AllocationStatsService( + clusterService, + clusterInfoService, + shardsAllocator, + nodeAllocationStatsProvider + ); this.telemetryProvider = telemetryProvider; } @@ -400,7 +408,8 @@ private static ShardsAllocator createShardsAllocator( ClusterService clusterService, DesiredBalanceReconcilerAction reconciler, WriteLoadForecaster writeLoadForecaster, - TelemetryProvider telemetryProvider + TelemetryProvider telemetryProvider, + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { Map> allocators = new HashMap<>(); allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(clusterSettings, writeLoadForecaster)); @@ -412,7 +421,8 @@ private static ShardsAllocator createShardsAllocator( threadPool, clusterService, reconciler, - telemetryProvider + telemetryProvider, + nodeAllocationStatsProvider ) ); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java index 3651f560e6dde..0c82faaaeaa45 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsService.java @@ -10,86 +10,35 @@ package org.elasticsearch.cluster.routing.allocation; import org.elasticsearch.cluster.ClusterInfoService; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.util.Maps; import java.util.Map; +import java.util.function.Supplier; public class AllocationStatsService { - private final ClusterService clusterService; private final ClusterInfoService clusterInfoService; - private final DesiredBalanceShardsAllocator desiredBalanceShardsAllocator; - private final WriteLoadForecaster writeLoadForecaster; + private final Supplier desiredBalanceSupplier; + private final NodeAllocationStatsProvider nodeAllocationStatsProvider; public AllocationStatsService( ClusterService clusterService, ClusterInfoService clusterInfoService, ShardsAllocator shardsAllocator, - WriteLoadForecaster writeLoadForecaster + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { this.clusterService = clusterService; this.clusterInfoService = clusterInfoService; - this.desiredBalanceShardsAllocator = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator ? allocator : null; - this.writeLoadForecaster = writeLoadForecaster; + this.nodeAllocationStatsProvider = nodeAllocationStatsProvider; + this.desiredBalanceSupplier = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator + ? allocator::getDesiredBalance + : () -> null; } public Map stats() { - var state = clusterService.state(); - var info = clusterInfoService.getClusterInfo(); - var desiredBalance = desiredBalanceShardsAllocator != null ? desiredBalanceShardsAllocator.getDesiredBalance() : null; - - var stats = Maps.newMapWithExpectedSize(state.getRoutingNodes().size()); - for (RoutingNode node : state.getRoutingNodes()) { - int shards = 0; - int undesiredShards = 0; - double forecastedWriteLoad = 0.0; - long forecastedDiskUsage = 0; - long currentDiskUsage = 0; - for (ShardRouting shardRouting : node) { - if (shardRouting.relocating()) { - continue; - } - shards++; - IndexMetadata indexMetadata = state.metadata().getIndexSafe(shardRouting.index()); - if (isDesiredAllocation(desiredBalance, shardRouting) == false) { - undesiredShards++; - } - long shardSize = info.getShardSize(shardRouting.shardId(), shardRouting.primary(), 0); - forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); - forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize); - currentDiskUsage += shardSize; - - } - stats.put( - node.nodeId(), - new NodeAllocationStats( - shards, - desiredBalanceShardsAllocator != null ? undesiredShards : -1, - forecastedWriteLoad, - forecastedDiskUsage, - currentDiskUsage - ) - ); - } - - return stats; - } - - private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) { - if (desiredBalance == null) { - return true; - } - var assignment = desiredBalance.getAssignment(shardRouting.shardId()); - if (assignment == null) { - return false; - } - return assignment.nodeIds().contains(shardRouting.currentNodeId()); + return nodeAllocationStatsProvider.stats(clusterService.state(), clusterInfoService.getClusterInfo(), desiredBalanceSupplier.get()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java new file mode 100644 index 0000000000000..157b409be14d3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationStatsProvider.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.Nullable; + +import java.util.Map; + +public class NodeAllocationStatsProvider { + private final WriteLoadForecaster writeLoadForecaster; + + public NodeAllocationStatsProvider(WriteLoadForecaster writeLoadForecaster) { + this.writeLoadForecaster = writeLoadForecaster; + } + + public Map stats( + ClusterState clusterState, + ClusterInfo clusterInfo, + @Nullable DesiredBalance desiredBalance + ) { + var stats = Maps.newMapWithExpectedSize(clusterState.getRoutingNodes().size()); + for (RoutingNode node : clusterState.getRoutingNodes()) { + int shards = 0; + int undesiredShards = 0; + double forecastedWriteLoad = 0.0; + long forecastedDiskUsage = 0; + long currentDiskUsage = 0; + for (ShardRouting shardRouting : node) { + if (shardRouting.relocating()) { + continue; + } + shards++; + IndexMetadata indexMetadata = clusterState.metadata().getIndexSafe(shardRouting.index()); + if (isDesiredAllocation(desiredBalance, shardRouting) == false) { + undesiredShards++; + } + long shardSize = clusterInfo.getShardSize(shardRouting.shardId(), shardRouting.primary(), 0); + forecastedWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); + forecastedDiskUsage += Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0), shardSize); + currentDiskUsage += shardSize; + + } + stats.put( + node.nodeId(), + new NodeAllocationStats( + shards, + desiredBalance != null ? undesiredShards : -1, + forecastedWriteLoad, + forecastedDiskUsage, + currentDiskUsage + ) + ); + } + + return stats; + } + + private static boolean isDesiredAllocation(DesiredBalance desiredBalance, ShardRouting shardRouting) { + if (desiredBalance == null) { + return true; + } + var assignment = desiredBalance.getAssignment(shardRouting.shardId()); + if (assignment == null) { + return false; + } + return assignment.nodeIds().contains(shardRouting.currentNodeId()); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java index d8a2d01f56dff..3ed5bc269e6c4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java @@ -10,6 +10,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import org.elasticsearch.telemetry.metric.LongWithAttributes; import org.elasticsearch.telemetry.metric.MeterRegistry; @@ -26,10 +27,12 @@ public record AllocationStats(long unassignedShards, long totalAllocations, long public record NodeWeightStats(long shardCount, double diskUsageInBytes, double writeLoad, double nodeWeight) {} public static final DesiredBalanceMetrics NOOP = new DesiredBalanceMetrics(MeterRegistry.NOOP); + public static final String UNASSIGNED_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.unassigned.current"; public static final String TOTAL_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.current"; public static final String UNDESIRED_ALLOCATION_COUNT_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.current"; public static final String UNDESIRED_ALLOCATION_RATIO_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.ratio"; + public static final String DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME = "es.allocator.desired_balance.allocations.node_weight.current"; public static final String DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME = "es.allocator.desired_balance.allocations.node_shard_count.current"; @@ -37,6 +40,15 @@ public record NodeWeightStats(long shardCount, double diskUsageInBytes, double w "es.allocator.desired_balance.allocations.node_write_load.current"; public static final String DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME = "es.allocator.desired_balance.allocations.node_disk_usage_bytes.current"; + + public static final String CURRENT_NODE_SHARD_COUNT_METRIC_NAME = "es.allocator.allocations.node.shard_count.current"; + public static final String CURRENT_NODE_WRITE_LOAD_METRIC_NAME = "es.allocator.allocations.node.write_load.current"; + public static final String CURRENT_NODE_DISK_USAGE_METRIC_NAME = "es.allocator.allocations.node.disk_usage_bytes.current"; + public static final String CURRENT_NODE_UNDESIRED_SHARD_COUNT_METRIC_NAME = + "es.allocator.allocations.node.undesired_shard_count.current"; + public static final String CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME = + "es.allocator.allocations.node.forecasted_disk_usage_bytes.current"; + public static final AllocationStats EMPTY_ALLOCATION_STATS = new AllocationStats(-1, -1, -1); private volatile boolean nodeIsMaster = false; @@ -56,8 +68,13 @@ public record NodeWeightStats(long shardCount, double diskUsageInBytes, double w private volatile long undesiredAllocations; private final AtomicReference> weightStatsPerNodeRef = new AtomicReference<>(Map.of()); + private final AtomicReference> allocationStatsPerNodeRef = new AtomicReference<>(Map.of()); - public void updateMetrics(AllocationStats allocationStats, Map weightStatsPerNode) { + public void updateMetrics( + AllocationStats allocationStats, + Map weightStatsPerNode, + Map nodeAllocationStats + ) { assert allocationStats != null : "allocation stats cannot be null"; assert weightStatsPerNode != null : "node balance weight stats cannot be null"; if (allocationStats != EMPTY_ALLOCATION_STATS) { @@ -66,6 +83,7 @@ public void updateMetrics(AllocationStats allocationStats, Map getDesiredBalanceNodeShardCountMetrics() { return values; } + private List getCurrentNodeDiskUsageMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + doubles.add(new DoubleWithAttributes(stats.get(node).currentDiskUsage(), getNodeAttributes(node))); + } + return doubles; + } + + private List getCurrentNodeWriteLoadMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + doubles.add(new DoubleWithAttributes(stats.get(node).forecastedIngestLoad(), getNodeAttributes(node))); + } + return doubles; + } + + private List getCurrentNodeShardCountMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List values = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + values.add(new LongWithAttributes(stats.get(node).shards(), getNodeAttributes(node))); + } + return values; + } + + private List getCurrentNodeForecastedDiskUsageMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List doubles = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + doubles.add(new DoubleWithAttributes(stats.get(node).forecastedDiskUsage(), getNodeAttributes(node))); + } + return doubles; + } + + private List getCurrentNodeUndesiredShardCountMetrics() { + if (nodeIsMaster == false) { + return List.of(); + } + var stats = allocationStatsPerNodeRef.get(); + List values = new ArrayList<>(stats.size()); + for (var node : stats.keySet()) { + values.add(new LongWithAttributes(stats.get(node).undesiredShards(), getNodeAttributes(node))); + } + return values; + } + private Map getNodeAttributes(DiscoveryNode node) { return Map.of("node_id", node.getId(), "node_name", node.getName()); } @@ -216,5 +324,6 @@ public void zeroAllMetrics() { totalAllocations = 0; undesiredAllocations = 0; weightStatsPerNodeRef.set(Map.of()); + allocationStatsPerNodeRef.set(Map.of()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 129144a3d734b..5ad29debc8f20 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -20,6 +20,8 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics.AllocationStats; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -34,7 +36,9 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.Comparator; +import java.util.HashMap; import java.util.Iterator; +import java.util.Map; import java.util.Set; import java.util.function.BiFunction; import java.util.stream.Collectors; @@ -71,8 +75,14 @@ public class DesiredBalanceReconciler { private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering(); private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering(); private final DesiredBalanceMetrics desiredBalanceMetrics; - - public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool, DesiredBalanceMetrics desiredBalanceMetrics) { + private final NodeAllocationStatsProvider nodeAllocationStatsProvider; + + public DesiredBalanceReconciler( + ClusterSettings clusterSettings, + ThreadPool threadPool, + DesiredBalanceMetrics desiredBalanceMetrics, + NodeAllocationStatsProvider nodeAllocationStatsProvider + ) { this.desiredBalanceMetrics = desiredBalanceMetrics; this.undesiredAllocationLogInterval = new FrequencyCappedAction( threadPool.relativeTimeInMillisSupplier(), @@ -83,6 +93,7 @@ public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool thre UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING, value -> this.undesiredAllocationsLogThreshold = value ); + this.nodeAllocationStatsProvider = nodeAllocationStatsProvider; } public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) { @@ -143,8 +154,20 @@ void run() { logger.debug("Reconciliation is complete"); - desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode()); + updateDesireBalanceMetrics(allocationStats); + } + } + + private void updateDesireBalanceMetrics(AllocationStats allocationStats) { + var stats = nodeAllocationStatsProvider.stats(allocation.getClusterState(), allocation.clusterInfo(), desiredBalance); + Map nodeAllocationStats = new HashMap<>(stats.size()); + for (var entry : stats.entrySet()) { + var node = allocation.nodes().get(entry.getKey()); + if (node != null) { + nodeAllocationStats.put(node, entry.getValue()); + } } + desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode(), nodeAllocationStats); } private boolean allocateUnassignedInvariant() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 5ccb59e29d7dc..5597eb47e765b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; @@ -85,7 +86,8 @@ public DesiredBalanceShardsAllocator( ThreadPool threadPool, ClusterService clusterService, DesiredBalanceReconcilerAction reconciler, - TelemetryProvider telemetryProvider + TelemetryProvider telemetryProvider, + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { this( delegateAllocator, @@ -93,7 +95,8 @@ public DesiredBalanceShardsAllocator( clusterService, new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator), reconciler, - telemetryProvider + telemetryProvider, + nodeAllocationStatsProvider ); } @@ -103,7 +106,8 @@ public DesiredBalanceShardsAllocator( ClusterService clusterService, DesiredBalanceComputer desiredBalanceComputer, DesiredBalanceReconcilerAction reconciler, - TelemetryProvider telemetryProvider + TelemetryProvider telemetryProvider, + NodeAllocationStatsProvider nodeAllocationStatsProvider ) { this.desiredBalanceMetrics = new DesiredBalanceMetrics(telemetryProvider.getMeterRegistry()); this.delegateAllocator = delegateAllocator; @@ -113,7 +117,8 @@ public DesiredBalanceShardsAllocator( this.desiredBalanceReconciler = new DesiredBalanceReconciler( clusterService.getClusterSettings(), threadPool, - desiredBalanceMetrics + desiredBalanceMetrics, + nodeAllocationStatsProvider ); this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java index bb4aa9beeb42e..8ea8b24baf6d5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java @@ -122,7 +122,8 @@ public DesiredBalance compute( clusterService, computer, (state, action) -> state, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = new MockAllocationService( randomAllocationDeciders(settings, clusterSettings), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java index 69e6983e16381..0efa576a0cddc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationStatsServiceTests.java @@ -80,7 +80,12 @@ public void testShardStats() { var queue = new DeterministicTaskQueue(); try (var clusterService = ClusterServiceUtils.createClusterService(state, queue.getThreadPool())) { - var service = new AllocationStatsService(clusterService, () -> clusterInfo, createShardAllocator(), TEST_WRITE_LOAD_FORECASTER); + var service = new AllocationStatsService( + clusterService, + () -> clusterInfo, + createShardAllocator(), + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) + ); assertThat( service.stats(), allOf( @@ -120,7 +125,7 @@ public void testRelocatingShardIsOnlyCountedOnceOnTargetNode() { clusterService, EmptyClusterInfoService.INSTANCE, createShardAllocator(), - TEST_WRITE_LOAD_FORECASTER + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) ); assertThat( service.stats(), @@ -163,7 +168,8 @@ public void testUndesiredShardCount() { threadPool, clusterService, (innerState, strategy) -> innerState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { @Override public DesiredBalance getDesiredBalance() { @@ -176,7 +182,7 @@ public DesiredBalance getDesiredBalance() { ); } }, - TEST_WRITE_LOAD_FORECASTER + new NodeAllocationStatsProvider(TEST_WRITE_LOAD_FORECASTER) ); assertThat( service.stats(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java index 44f3b7d1d3a11..c5ae771199541 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterAllocationSimulationTests.java @@ -490,7 +490,8 @@ private Map.Entry createNewAllocationSer clusterService, (clusterState, routingAllocationAction) -> strategyRef.get() .executeWithRoutingAllocation(clusterState, "reconcile-desired-balance", routingAllocationAction), - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { @Override public void allocate(RoutingAllocation allocation, ActionListener listener) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java index 85dc5c9dcd6a9..9e6e080f38216 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetricsTests.java @@ -27,7 +27,7 @@ public void testZeroAllMetrics() { long unassignedShards = randomNonNegativeLong(); long totalAllocations = randomNonNegativeLong(); long undesiredAllocations = randomNonNegativeLong(); - metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of()); + metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of(), Map.of()); assertEquals(totalAllocations, metrics.totalAllocations()); assertEquals(unassignedShards, metrics.unassignedShards()); assertEquals(undesiredAllocations, metrics.undesiredAllocations()); @@ -44,7 +44,7 @@ public void testMetricsAreOnlyPublishedWhenNodeIsMaster() { long unassignedShards = randomNonNegativeLong(); long totalAllocations = randomLongBetween(100, 10000000); long undesiredAllocations = randomLongBetween(0, totalAllocations); - metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of()); + metrics.updateMetrics(new AllocationStats(unassignedShards, totalAllocations, undesiredAllocations), Map.of(), Map.of()); // Collect when not master meterRegistry.getRecorder().collect(); @@ -104,7 +104,7 @@ public void testUndesiredAllocationRatioIsZeroWhenTotalShardsIsZero() { RecordingMeterRegistry meterRegistry = new RecordingMeterRegistry(); DesiredBalanceMetrics metrics = new DesiredBalanceMetrics(meterRegistry); long unassignedShards = randomNonNegativeLong(); - metrics.updateMetrics(new AllocationStats(unassignedShards, 0, 0), Map.of()); + metrics.updateMetrics(new AllocationStats(unassignedShards, 0, 0), Map.of(), Map.of()); metrics.setNodeIsMaster(true); meterRegistry.getRecorder().collect(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index b5f44ee9e505f..54f4f0ffb6e15 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -1214,7 +1214,8 @@ public void testRebalanceDoesNotCauseHotSpots() { var reconciler = new DesiredBalanceReconciler( clusterSettings, new DeterministicTaskQueue().getThreadPool(), - DesiredBalanceMetrics.NOOP + DesiredBalanceMetrics.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var totalOutgoingMoves = new HashMap(); @@ -1296,7 +1297,12 @@ public void testShouldLogOnTooManyUndesiredAllocations() { final var timeInMillisSupplier = new AtomicLong(); when(threadPool.relativeTimeInMillisSupplier()).thenReturn(timeInMillisSupplier::incrementAndGet); - var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP); + var reconciler = new DesiredBalanceReconciler( + createBuiltInClusterSettings(), + threadPool, + DesiredBalanceMetrics.NOOP, + EMPTY_NODE_ALLOCATION_STATS + ); final long initialDelayInMillis = TimeValue.timeValueMinutes(5).getMillis(); timeInMillisSupplier.addAndGet(randomLongBetween(initialDelayInMillis, 2 * initialDelayInMillis)); @@ -1348,10 +1354,8 @@ public void testShouldLogOnTooManyUndesiredAllocations() { private static void reconcile(RoutingAllocation routingAllocation, DesiredBalance desiredBalance) { final var threadPool = mock(ThreadPool.class); when(threadPool.relativeTimeInMillisSupplier()).thenReturn(new AtomicLong()::incrementAndGet); - new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP).reconcile( - desiredBalance, - routingAllocation - ); + new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool, DesiredBalanceMetrics.NOOP, EMPTY_NODE_ALLOCATION_STATS) + .reconcile(desiredBalance, routingAllocation); } private static boolean isReconciled(RoutingNode node, DesiredBalance balance) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index 2cb3204787ce1..61962c4e8cca7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -168,7 +168,8 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo threadPool, clusterService, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); assertValidStats(desiredBalanceShardsAllocator.getStats()); var allocationService = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator(allocateUnassigned)); @@ -295,7 +296,8 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo threadPool, clusterService, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = new AllocationService( new AllocationDeciders(List.of()), @@ -413,7 +415,8 @@ boolean hasEnoughIterations(int currentIteration) { } }, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); allocationServiceRef.set(allocationService); @@ -540,7 +543,8 @@ public DesiredBalance compute( } }, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); allocationServiceRef.set(allocationService); @@ -643,7 +647,8 @@ public DesiredBalance compute( } }, reconcileAction, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var allocationService = createAllocationService(desiredBalanceShardsAllocator, gatewayAllocator); @@ -734,7 +739,8 @@ public DesiredBalance compute( clusterService, desiredBalanceComputer, (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var service = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator()); @@ -791,7 +797,8 @@ public void testResetDesiredBalanceOnNoLongerMaster() { clusterService, desiredBalanceComputer, (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ); var service = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator()); @@ -844,7 +851,8 @@ public void testResetDesiredBalanceOnNodeShutdown() { clusterService, desiredBalanceComputer, (reconcilerClusterState, rerouteStrategy) -> reconcilerClusterState, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { @Override public void resetDesiredBalance() { diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index a1718e956800c..a041efc9ad3f1 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -24,6 +24,8 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedShard; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStats; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsProvider; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -37,6 +39,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.gateway.GatewayAllocator; @@ -165,7 +168,8 @@ private static DesiredBalanceShardsAllocator createDesiredBalanceShardsAllocator queue.getThreadPool(), clusterService, null, - TelemetryProvider.NOOP + TelemetryProvider.NOOP, + EMPTY_NODE_ALLOCATION_STATS ) { private RoutingAllocation lastAllocation; @@ -432,4 +436,17 @@ public void allocateUnassigned( } } } + + protected static final NodeAllocationStatsProvider EMPTY_NODE_ALLOCATION_STATS = new NodeAllocationStatsProvider( + WriteLoadForecaster.DEFAULT + ) { + @Override + public Map stats( + ClusterState clusterState, + ClusterInfo clusterInfo, + @Nullable DesiredBalance desiredBalance + ) { + return Map.of(); + } + }; } From 65de0f0ca90b4c4aa5706aa92169929a5cfff4cc Mon Sep 17 00:00:00 2001 From: Ying Mao Date: Tue, 12 Nov 2024 11:22:55 -0500 Subject: [PATCH 62/95] Hides `hugging_face_elser` service from the `GET _inference/_services API` (#116664) * Adding hideFromConfigurationApi flag * Update docs/changelog/116664.yaml --- docs/changelog/116664.yaml | 6 ++++++ .../inference/InferenceService.java | 8 ++++++++ .../xpack/inference/InferenceCrudIT.java | 13 +++++-------- .../TransportGetInferenceServicesAction.java | 19 ++++++++++++------- .../elser/HuggingFaceElserService.java | 5 +++++ 5 files changed, 36 insertions(+), 15 deletions(-) create mode 100644 docs/changelog/116664.yaml diff --git a/docs/changelog/116664.yaml b/docs/changelog/116664.yaml new file mode 100644 index 0000000000000..36915fca39731 --- /dev/null +++ b/docs/changelog/116664.yaml @@ -0,0 +1,6 @@ +pr: 116664 +summary: Hides `hugging_face_elser` service from the `GET _inference/_services API` +area: Machine Learning +type: bug +issues: + - 116644 diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index cd92f38e65152..f7b688ba37963 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -74,6 +74,14 @@ default void init(Client client) {} InferenceServiceConfiguration getConfiguration(); + /** + * Whether this service should be hidden from the API. Should be used for services + * that are not ready to be used. + */ + default Boolean hideFromConfigurationApi() { + return Boolean.FALSE; + } + /** * The task types supported by the service * @return Set of supported. diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index f9a1318cd9740..081c83b1e7067 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -135,9 +135,9 @@ public void testApisWithoutTaskType() throws IOException { public void testGetServicesWithoutTaskType() throws IOException { List services = getAllServices(); if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - assertThat(services.size(), equalTo(19)); - } else { assertThat(services.size(), equalTo(18)); + } else { + assertThat(services.size(), equalTo(17)); } String[] providers = new String[services.size()]; @@ -160,7 +160,6 @@ public void testGetServicesWithoutTaskType() throws IOException { "googleaistudio", "googlevertexai", "hugging_face", - "hugging_face_elser", "mistral", "openai", "streaming_completion_test_service", @@ -259,9 +258,9 @@ public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { List services = getServices(TaskType.SPARSE_EMBEDDING); if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { - assertThat(services.size(), equalTo(6)); - } else { assertThat(services.size(), equalTo(5)); + } else { + assertThat(services.size(), equalTo(4)); } String[] providers = new String[services.size()]; @@ -272,9 +271,7 @@ public void testGetServicesWithSparseEmbeddingTaskType() throws IOException { Arrays.sort(providers); - var providerList = new ArrayList<>( - Arrays.asList("alibabacloud-ai-search", "elasticsearch", "hugging_face", "hugging_face_elser", "test_service") - ); + var providerList = new ArrayList<>(Arrays.asList("alibabacloud-ai-search", "elasticsearch", "hugging_face", "test_service")); if (ElasticInferenceServiceFeature.ELASTIC_INFERENCE_SERVICE_FEATURE_FLAG.isEnabled()) { providerList.add(1, "elastic"); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java index a6109bfe659d7..002b2b0fe93b0 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceServicesAction.java @@ -68,7 +68,10 @@ private void getServiceConfigurationsForTaskType( var filteredServices = serviceRegistry.getServices() .entrySet() .stream() - .filter(service -> service.getValue().supportedTaskTypes().contains(requestedTaskType)) + .filter( + service -> service.getValue().hideFromConfigurationApi() == false + && service.getValue().supportedTaskTypes().contains(requestedTaskType) + ) .collect(Collectors.toSet()); getServiceConfigurationsForServices(filteredServices, listener.delegateFailureAndWrap((delegate, configurations) -> { @@ -77,12 +80,14 @@ private void getServiceConfigurationsForTaskType( } private void getAllServiceConfigurations(ActionListener listener) { - getServiceConfigurationsForServices( - serviceRegistry.getServices().entrySet(), - listener.delegateFailureAndWrap((delegate, configurations) -> { - delegate.onResponse(new GetInferenceServicesAction.Response(configurations)); - }) - ); + var availableServices = serviceRegistry.getServices() + .entrySet() + .stream() + .filter(service -> service.getValue().hideFromConfigurationApi() == false) + .collect(Collectors.toSet()); + getServiceConfigurationsForServices(availableServices, listener.delegateFailureAndWrap((delegate, configurations) -> { + delegate.onResponse(new GetInferenceServicesAction.Response(configurations)); + })); } private void getServiceConfigurationsForServices( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java index e0afbf924f654..a2e22e24172cf 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/huggingface/elser/HuggingFaceElserService.java @@ -125,6 +125,11 @@ public InferenceServiceConfiguration getConfiguration() { return Configuration.get(); } + @Override + public Boolean hideFromConfigurationApi() { + return Boolean.TRUE; + } + @Override public EnumSet supportedTaskTypes() { return supportedTaskTypes; From 55450fe11274a38d80683d81a2367b8053397a4f Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Tue, 12 Nov 2024 17:24:24 +0100 Subject: [PATCH 63/95] Use a time supplier interface instead of passing ThreadPool (#116333) An attempt to use a basic interface for time supplier based on https://github.com/elastic/elasticsearch/pull/115511#discussion_r1816300609. (TLDR: sometimes we pass around a ThreadPool instance just to be able to get time. It might be more reasonable to separate those use cases) --- .../allocator/DesiredBalanceComputer.java | 12 ++-- .../DesiredBalanceShardsAllocator.java | 2 +- .../common/time/TimeProvider.java | 55 +++++++++++++++++++ .../elasticsearch/threadpool/ThreadPool.java | 36 ++---------- ...nsportDeleteDesiredBalanceActionTests.java | 2 +- .../DesiredBalanceComputerTests.java | 54 +++++++++--------- .../DesiredBalanceShardsAllocatorTests.java | 21 +++---- .../common/time/TimeProviderUtils.java | 45 +++++++++++++++ 8 files changed, 148 insertions(+), 79 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/time/TimeProvider.java create mode 100644 server/src/test/java/org/elasticsearch/common/time/TimeProviderUtils.java diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 682dc85ccd00f..3b22221ea7db4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.time.TimeProvider; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; @@ -37,7 +38,6 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; -import java.util.function.LongSupplier; import java.util.function.Predicate; import static java.util.stream.Collectors.toUnmodifiableSet; @@ -50,7 +50,7 @@ public class DesiredBalanceComputer { private static final Logger logger = LogManager.getLogger(DesiredBalanceComputer.class); private final ShardsAllocator delegateAllocator; - private final LongSupplier timeSupplierMillis; + private final TimeProvider timeProvider; // stats protected final MeanMetric iterations = new MeanMetric(); @@ -73,9 +73,9 @@ public class DesiredBalanceComputer { private TimeValue progressLogInterval; private long maxBalanceComputationTimeDuringIndexCreationMillis; - public DesiredBalanceComputer(ClusterSettings clusterSettings, LongSupplier timeSupplierMillis, ShardsAllocator delegateAllocator) { + public DesiredBalanceComputer(ClusterSettings clusterSettings, TimeProvider timeProvider, ShardsAllocator delegateAllocator) { this.delegateAllocator = delegateAllocator; - this.timeSupplierMillis = timeSupplierMillis; + this.timeProvider = timeProvider; clusterSettings.initializeAndWatch(PROGRESS_LOG_INTERVAL_SETTING, value -> this.progressLogInterval = value); clusterSettings.initializeAndWatch( MAX_BALANCE_COMPUTATION_TIME_DURING_INDEX_CREATION_SETTING, @@ -275,7 +275,7 @@ public DesiredBalance compute( final int iterationCountReportInterval = computeIterationCountReportInterval(routingAllocation); final long timeWarningInterval = progressLogInterval.millis(); - final long computationStartedTime = timeSupplierMillis.getAsLong(); + final long computationStartedTime = timeProvider.relativeTimeInMillis(); long nextReportTime = computationStartedTime + timeWarningInterval; int i = 0; @@ -323,7 +323,7 @@ public DesiredBalance compute( i++; final int iterations = i; - final long currentTime = timeSupplierMillis.getAsLong(); + final long currentTime = timeProvider.relativeTimeInMillis(); final boolean reportByTime = nextReportTime <= currentTime; final boolean reportByIterationCount = i % iterationCountReportInterval == 0; if (reportByTime || reportByIterationCount) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 5597eb47e765b..bfe8a20f18043 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -93,7 +93,7 @@ public DesiredBalanceShardsAllocator( delegateAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator), + new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator), reconciler, telemetryProvider, nodeAllocationStatsProvider diff --git a/server/src/main/java/org/elasticsearch/common/time/TimeProvider.java b/server/src/main/java/org/elasticsearch/common/time/TimeProvider.java new file mode 100644 index 0000000000000..8b29d23397383 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/TimeProvider.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common.time; + +/** + * An interface encapsulating the different methods for getting relative and absolute time. The main + * implementation of this is {@link org.elasticsearch.threadpool.ThreadPool}. To make it clear that a + * {@code ThreadPool} is being passed around only to get time, it is preferred to use this interface. + */ +public interface TimeProvider { + + /** + * Returns a value of milliseconds that may be used for relative time calculations. + * + * This method should only be used for calculating time deltas. For an epoch based + * timestamp, see {@link #absoluteTimeInMillis()}. + */ + long relativeTimeInMillis(); + + /** + * Returns a value of nanoseconds that may be used for relative time calculations. + * + * This method should only be used for calculating time deltas. For an epoch based + * timestamp, see {@link #absoluteTimeInMillis()}. + */ + long relativeTimeInNanos(); + + /** + * Returns a value of milliseconds that may be used for relative time calculations. Similar to {@link #relativeTimeInMillis()} except + * that this method is more expensive: the return value is computed directly from {@link System#nanoTime} and is not cached. You should + * use {@link #relativeTimeInMillis()} unless the extra accuracy offered by this method is worth the costs. + * + * When computing a time interval by comparing relative times in milliseconds, you should make sure that both endpoints use cached + * values returned from {@link #relativeTimeInMillis()} or that they both use raw values returned from this method. It doesn't really + * make sense to compare a raw value to a cached value, even if in practice the result of such a comparison will be approximately + * sensible. + */ + long rawRelativeTimeInMillis(); + + /** + * Returns the value of milliseconds since UNIX epoch. + * + * This method should only be used for exact date/time formatting. For calculating + * time deltas that should not suffer from negative deltas, which are possible with + * this method, see {@link #relativeTimeInMillis()}. + */ + long absoluteTimeInMillis(); +} diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 0155ab34ae637..f55e3740aaa8f 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.TimeProvider; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.SizeValue; @@ -65,7 +66,7 @@ * Manages all the Java thread pools we create. {@link Names} contains a list of the thread pools, but plugins can dynamically add more * thread pools to instantiate. */ -public class ThreadPool implements ReportingService, Scheduler { +public class ThreadPool implements ReportingService, Scheduler, TimeProvider { private static final Logger logger = LogManager.getLogger(ThreadPool.class); @@ -362,12 +363,7 @@ protected ThreadPool() { this.scheduler = null; } - /** - * Returns a value of milliseconds that may be used for relative time calculations. - * - * This method should only be used for calculating time deltas. For an epoch based - * timestamp, see {@link #absoluteTimeInMillis()}. - */ + @Override public long relativeTimeInMillis() { return cachedTimeThread.relativeTimeInMillis(); } @@ -379,37 +375,17 @@ public LongSupplier relativeTimeInMillisSupplier() { return relativeTimeInMillisSupplier; } - /** - * Returns a value of nanoseconds that may be used for relative time calculations. - * - * This method should only be used for calculating time deltas. For an epoch based - * timestamp, see {@link #absoluteTimeInMillis()}. - */ + @Override public long relativeTimeInNanos() { return cachedTimeThread.relativeTimeInNanos(); } - /** - * Returns a value of milliseconds that may be used for relative time calculations. Similar to {@link #relativeTimeInMillis()} except - * that this method is more expensive: the return value is computed directly from {@link System#nanoTime} and is not cached. You should - * use {@link #relativeTimeInMillis()} unless the extra accuracy offered by this method is worth the costs. - * - * When computing a time interval by comparing relative times in milliseconds, you should make sure that both endpoints use cached - * values returned from {@link #relativeTimeInMillis()} or that they both use raw values returned from this method. It doesn't really - * make sense to compare a raw value to a cached value, even if in practice the result of such a comparison will be approximately - * sensible. - */ + @Override public long rawRelativeTimeInMillis() { return TimeValue.nsecToMSec(System.nanoTime()); } - /** - * Returns the value of milliseconds since UNIX epoch. - * - * This method should only be used for exact date/time formatting. For calculating - * time deltas that should not suffer from negative deltas, which are possible with - * this method, see {@link #relativeTimeInMillis()}. - */ + @Override public long absoluteTimeInMillis() { return cachedTimeThread.absoluteTimeInMillis(); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java index 8ea8b24baf6d5..3dafc8f000f3f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java @@ -101,7 +101,7 @@ public void testDeleteDesiredBalance() throws Exception { var clusterSettings = ClusterSettings.createBuiltInClusterSettings(settings); var delegate = new BalancedShardsAllocator(); - var computer = new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegate) { + var computer = new DesiredBalanceComputer(clusterSettings, threadPool, delegate) { final AtomicReference lastComputationInput = new AtomicReference<>(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index 51401acabb0ac..7b77947792bd4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -42,6 +42,8 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.TimeProvider; +import org.elasticsearch.common.time.TimeProviderUtils; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.IndexVersion; @@ -1203,42 +1205,40 @@ public void testShouldLogComputationIteration() { private void checkIterationLogging(int iterations, long eachIterationDuration, MockLog.AbstractEventExpectation expectation) { var currentTime = new AtomicLong(0L); + TimeProvider timeProvider = TimeProviderUtils.create(() -> currentTime.addAndGet(eachIterationDuration)); + // Some runs of this test try to simulate a long desired balance computation. Setting a high value on the following setting // prevents interrupting a long computation. var clusterSettings = createBuiltInClusterSettings( Settings.builder().put(DesiredBalanceComputer.MAX_BALANCE_COMPUTATION_TIME_DURING_INDEX_CREATION_SETTING.getKey(), "2m").build() ); - var desiredBalanceComputer = new DesiredBalanceComputer( - clusterSettings, - () -> currentTime.addAndGet(eachIterationDuration), - new ShardsAllocator() { - @Override - public void allocate(RoutingAllocation allocation) { - final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); - while (unassignedIterator.hasNext()) { - final var shardRouting = unassignedIterator.next(); - if (shardRouting.primary()) { - unassignedIterator.initialize("node-0", null, 0L, allocation.changes()); - } else { - unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); - } - } - - // move shard on each iteration - for (var shard : allocation.routingNodes().node("node-0").shardsWithState(STARTED).toList()) { - allocation.routingNodes().relocateShard(shard, "node-1", 0L, "test", allocation.changes()); - } - for (var shard : allocation.routingNodes().node("node-1").shardsWithState(STARTED).toList()) { - allocation.routingNodes().relocateShard(shard, "node-0", 0L, "test", allocation.changes()); + var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, timeProvider, new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + final var unassignedIterator = allocation.routingNodes().unassigned().iterator(); + while (unassignedIterator.hasNext()) { + final var shardRouting = unassignedIterator.next(); + if (shardRouting.primary()) { + unassignedIterator.initialize("node-0", null, 0L, allocation.changes()); + } else { + unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); } } - @Override - public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { - throw new AssertionError("only used for allocation explain"); + // move shard on each iteration + for (var shard : allocation.routingNodes().node("node-0").shardsWithState(STARTED).toList()) { + allocation.routingNodes().relocateShard(shard, "node-1", 0L, "test", allocation.changes()); + } + for (var shard : allocation.routingNodes().node("node-1").shardsWithState(STARTED).toList()) { + allocation.routingNodes().relocateShard(shard, "node-0", 0L, "test", allocation.changes()); } } - ); + + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + throw new AssertionError("only used for allocation explain"); + } + }); assertThatLogger(() -> { var iteration = new AtomicInteger(0); @@ -1346,7 +1346,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing } private static DesiredBalanceComputer createDesiredBalanceComputer(ShardsAllocator allocator) { - return new DesiredBalanceComputer(createBuiltInClusterSettings(), () -> 0L, allocator); + return new DesiredBalanceComputer(createBuiltInClusterSettings(), TimeProviderUtils.create(() -> 0L), allocator); } private static void assertDesiredAssignments(DesiredBalance desiredBalance, Map expected) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index 61962c4e8cca7..b18e2c0cd2647 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.cluster.service.FakeThreadPoolMasterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.TimeProviderUtils; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.core.TimeValue; @@ -398,7 +399,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, time::get, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, TimeProviderUtils.create(time::get), shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -525,7 +526,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -629,7 +630,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo shardsAllocator, threadPool, clusterService, - new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, shardsAllocator) { + new DesiredBalanceComputer(clusterSettings, threadPool, shardsAllocator) { @Override public DesiredBalance compute( DesiredBalance previousDesiredBalance, @@ -717,7 +718,7 @@ public void testResetDesiredBalance() { var delegateAllocator = createShardsAllocator(); var clusterSettings = createBuiltInClusterSettings(); - var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool::relativeTimeInMillis, delegateAllocator) { + var desiredBalanceComputer = new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator) { final AtomicReference lastComputationInput = new AtomicReference<>(); @@ -786,11 +787,7 @@ public void testResetDesiredBalanceOnNoLongerMaster() { var clusterService = ClusterServiceUtils.createClusterService(clusterState, threadPool); var delegateAllocator = createShardsAllocator(); - var desiredBalanceComputer = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - threadPool::relativeTimeInMillis, - delegateAllocator - ); + var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator); var desiredBalanceShardsAllocator = new DesiredBalanceShardsAllocator( delegateAllocator, threadPool, @@ -840,11 +837,7 @@ public void testResetDesiredBalanceOnNodeShutdown() { final var resetCalled = new AtomicBoolean(); var delegateAllocator = createShardsAllocator(); - var desiredBalanceComputer = new DesiredBalanceComputer( - createBuiltInClusterSettings(), - threadPool::relativeTimeInMillis, - delegateAllocator - ); + var desiredBalanceComputer = new DesiredBalanceComputer(createBuiltInClusterSettings(), threadPool, delegateAllocator); var desiredBalanceAllocator = new DesiredBalanceShardsAllocator( delegateAllocator, threadPool, diff --git a/server/src/test/java/org/elasticsearch/common/time/TimeProviderUtils.java b/server/src/test/java/org/elasticsearch/common/time/TimeProviderUtils.java new file mode 100644 index 0000000000000..a3c5c105eb34a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/time/TimeProviderUtils.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.common.time; + +import org.elasticsearch.core.TimeValue; + +import java.util.function.LongSupplier; + +public class TimeProviderUtils { + + /** + * Creates a TimeProvider implementation for tests that uses the same source for + * all methods (regardless of relative or absolute time). + */ + public static TimeProvider create(LongSupplier timeSourceInMillis) { + return new TimeProvider() { + @Override + public long relativeTimeInMillis() { + return timeSourceInMillis.getAsLong(); + } + + @Override + public long relativeTimeInNanos() { + return timeSourceInMillis.getAsLong() * TimeValue.NSEC_PER_MSEC; + } + + @Override + public long rawRelativeTimeInMillis() { + return timeSourceInMillis.getAsLong(); + } + + @Override + public long absoluteTimeInMillis() { + return timeSourceInMillis.getAsLong(); + } + }; + } +} From 56bc6fda6f342a435837f0eef378fd96f70cab5a Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Tue, 12 Nov 2024 16:47:20 +0000 Subject: [PATCH 64/95] Bump versions after 8.16.0 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 12 ++++++------ .buildkite/pipelines/periodic.yml | 16 ++++++++-------- .ci/bwcVersions | 4 ++-- .ci/snapshotBwcVersions | 3 +-- .../src/main/java/org/elasticsearch/Version.java | 2 +- .../org/elasticsearch/TransportVersions.csv | 1 + .../org/elasticsearch/index/IndexVersions.csv | 1 + 8 files changed, 21 insertions(+), 20 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 167830d3ed8b3..19e99852869e6 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 0f2e70addd684..7dd8269f4ffe6 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -272,8 +272,8 @@ steps: env: BWC_VERSION: 8.14.3 - - label: "{{matrix.image}} / 8.15.5 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.5 + - label: "{{matrix.image}} / 8.15.4 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.4 timeout_in_minutes: 300 matrix: setup: @@ -286,10 +286,10 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.15.5 + BWC_VERSION: 8.15.4 - - label: "{{matrix.image}} / 8.16.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.0 + - label: "{{matrix.image}} / 8.16.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.1 timeout_in_minutes: 300 matrix: setup: @@ -302,7 +302,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.16.0 + BWC_VERSION: 8.16.1 - label: "{{matrix.image}} / 8.17.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.17.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index f68f64332426c..79371d6ddccf5 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -287,8 +287,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.15.5 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.5#bwcTest + - label: 8.15.4 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.4#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -297,7 +297,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.15.5 + BWC_VERSION: 8.15.4 retry: automatic: - exit_status: "-1" @@ -306,8 +306,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.16.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.0#bwcTest + - label: 8.16.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.16.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -316,7 +316,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.16.0 + BWC_VERSION: 8.16.1 retry: automatic: - exit_status: "-1" @@ -429,7 +429,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -471,7 +471,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.16.1", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index b4a4460ff5a80..85522e47a523f 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -14,7 +14,7 @@ BWC_VERSION: - "8.12.2" - "8.13.4" - "8.14.3" - - "8.15.5" - - "8.16.0" + - "8.15.4" + - "8.16.1" - "8.17.0" - "9.0.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 7dad55b653925..9ea3072021bb3 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - - "8.15.5" - - "8.16.0" + - "8.16.1" - "8.17.0" - "9.0.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 909d733fd3719..7791ca200a785 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -187,8 +187,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_2 = new Version(8_15_02_99); public static final Version V_8_15_3 = new Version(8_15_03_99); public static final Version V_8_15_4 = new Version(8_15_04_99); - public static final Version V_8_15_5 = new Version(8_15_05_99); public static final Version V_8_16_0 = new Version(8_16_00_99); + public static final Version V_8_16_1 = new Version(8_16_01_99); public static final Version V_8_17_0 = new Version(8_17_00_99); public static final Version V_9_0_0 = new Version(9_00_00_99); public static final Version CURRENT = V_9_0_0; diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 26c518962c19a..ba575cc642a81 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -132,3 +132,4 @@ 8.15.2,8702003 8.15.3,8702003 8.15.4,8702003 +8.16.0,8772001 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index 6cab0b513ee63..c54aea88613f5 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -132,3 +132,4 @@ 8.15.2,8512000 8.15.3,8512000 8.15.4,8512000 +8.16.0,8518000 From af12d888ecf7df37efce78db43bf56454da15e91 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Tue, 12 Nov 2024 16:48:35 +0000 Subject: [PATCH 65/95] Prune changelogs after 8.16.0 release --- docs/changelog/106520.yaml | 6 ------ docs/changelog/107047.yaml | 6 ------ docs/changelog/107936.yaml | 6 ------ docs/changelog/109017.yaml | 6 ------ docs/changelog/109193.yaml | 6 ------ docs/changelog/109414.yaml | 6 ------ docs/changelog/109583.yaml | 29 ----------------------------- docs/changelog/109667.yaml | 5 ----- docs/changelog/109684.yaml | 5 ----- docs/changelog/110021.yaml | 6 ------ docs/changelog/110116.yaml | 5 ----- docs/changelog/110216.yaml | 5 ----- docs/changelog/110237.yaml | 7 ------- docs/changelog/110399.yaml | 6 ------ docs/changelog/110427.yaml | 6 ------ docs/changelog/110520.yaml | 5 ----- docs/changelog/110524.yaml | 5 ----- docs/changelog/110527.yaml | 5 ----- docs/changelog/110554.yaml | 5 ----- docs/changelog/110574.yaml | 6 ------ docs/changelog/110578.yaml | 5 ----- docs/changelog/110593.yaml | 6 ------ docs/changelog/110603.yaml | 6 ------ docs/changelog/110606.yaml | 5 ----- docs/changelog/110630.yaml | 5 ----- docs/changelog/110633.yaml | 5 ----- docs/changelog/110669.yaml | 6 ------ docs/changelog/110676.yaml | 5 ----- docs/changelog/110677.yaml | 5 ----- docs/changelog/110718.yaml | 5 ----- docs/changelog/110734.yaml | 5 ----- docs/changelog/110796.yaml | 5 ----- docs/changelog/110816.yaml | 6 ------ docs/changelog/110829.yaml | 10 ---------- docs/changelog/110833.yaml | 5 ----- docs/changelog/110846.yaml | 5 ----- docs/changelog/110847.yaml | 5 ----- docs/changelog/110860.yaml | 5 ----- docs/changelog/110879.yaml | 5 ----- docs/changelog/110901.yaml | 15 --------------- docs/changelog/110921.yaml | 5 ----- docs/changelog/110928.yaml | 5 ----- docs/changelog/110951.yaml | 5 ----- docs/changelog/110971.yaml | 5 ----- docs/changelog/110974.yaml | 5 ----- docs/changelog/110986.yaml | 6 ------ docs/changelog/110993.yaml | 5 ----- docs/changelog/111015.yaml | 15 --------------- docs/changelog/111064.yaml | 6 ------ docs/changelog/111071.yaml | 5 ----- docs/changelog/111079.yaml | 5 ----- docs/changelog/111091.yaml | 5 ----- docs/changelog/111105.yaml | 5 ----- docs/changelog/111118.yaml | 5 ----- docs/changelog/111123.yaml | 5 ----- docs/changelog/111154.yaml | 5 ----- docs/changelog/111161.yaml | 6 ------ docs/changelog/111181.yaml | 5 ----- docs/changelog/111193.yaml | 6 ------ docs/changelog/111212.yaml | 6 ------ docs/changelog/111215.yaml | 6 ------ docs/changelog/111225.yaml | 5 ----- docs/changelog/111226.yaml | 5 ----- docs/changelog/111238.yaml | 6 ------ docs/changelog/111245.yaml | 6 ------ docs/changelog/111274.yaml | 5 ----- docs/changelog/111284.yaml | 6 ------ docs/changelog/111311.yaml | 6 ------ docs/changelog/111315.yaml | 5 ----- docs/changelog/111316.yaml | 5 ----- docs/changelog/111336.yaml | 5 ----- docs/changelog/111344.yaml | 5 ----- docs/changelog/111367.yaml | 5 ----- docs/changelog/111412.yaml | 6 ------ docs/changelog/111413.yaml | 6 ------ docs/changelog/111420.yaml | 5 ----- docs/changelog/111437.yaml | 5 ----- docs/changelog/111445.yaml | 5 ----- docs/changelog/111457.yaml | 6 ------ docs/changelog/111465.yaml | 5 ----- docs/changelog/111490.yaml | 5 ----- docs/changelog/111501.yaml | 6 ------ docs/changelog/111516.yaml | 5 ----- docs/changelog/111523.yaml | 5 ----- docs/changelog/111544.yaml | 5 ----- docs/changelog/111552.yaml | 5 ----- docs/changelog/111576.yaml | 6 ------ docs/changelog/111600.yaml | 5 ----- docs/changelog/111624.yaml | 6 ------ docs/changelog/111644.yaml | 6 ------ docs/changelog/111655.yaml | 5 ----- docs/changelog/111683.yaml | 6 ------ docs/changelog/111689.yaml | 6 ------ docs/changelog/111690.yaml | 5 ----- docs/changelog/111740.yaml | 6 ------ docs/changelog/111749.yaml | 6 ------ docs/changelog/111770.yaml | 5 ----- docs/changelog/111779.yaml | 7 ------- docs/changelog/111797.yaml | 6 ------ docs/changelog/111809.yaml | 5 ----- docs/changelog/111818.yaml | 5 ----- docs/changelog/111840.yaml | 5 ----- docs/changelog/111855.yaml | 5 ----- docs/changelog/111874.yaml | 8 -------- docs/changelog/111879.yaml | 6 ------ docs/changelog/111915.yaml | 6 ------ docs/changelog/111917.yaml | 7 ------- docs/changelog/111937.yaml | 6 ------ docs/changelog/111948.yaml | 5 ----- docs/changelog/111950.yaml | 6 ------ docs/changelog/111955.yaml | 7 ------- docs/changelog/111968.yaml | 6 ------ docs/changelog/111969.yaml | 5 ----- docs/changelog/111972.yaml | 17 ----------------- docs/changelog/111981.yaml | 6 ------ docs/changelog/112019.yaml | 5 ----- docs/changelog/112024.yaml | 5 ----- docs/changelog/112026.yaml | 5 ----- docs/changelog/112055.yaml | 6 ------ docs/changelog/112058.yaml | 5 ----- docs/changelog/112063.yaml | 32 -------------------------------- docs/changelog/112066.yaml | 6 ------ docs/changelog/112081.yaml | 5 ----- docs/changelog/112100.yaml | 5 ----- docs/changelog/112123.yaml | 5 ----- docs/changelog/112126.yaml | 5 ----- docs/changelog/112133.yaml | 5 ----- docs/changelog/112151.yaml | 5 ----- docs/changelog/112199.yaml | 5 ----- docs/changelog/112200.yaml | 6 ------ docs/changelog/112210.yaml | 5 ----- docs/changelog/112214.yaml | 5 ----- docs/changelog/112218.yaml | 9 --------- docs/changelog/112262.yaml | 6 ------ docs/changelog/112263.yaml | 6 ------ docs/changelog/112270.yaml | 5 ----- docs/changelog/112273.yaml | 5 ----- docs/changelog/112277.yaml | 5 ----- docs/changelog/112282.yaml | 6 ------ docs/changelog/112294.yaml | 8 -------- docs/changelog/112295.yaml | 5 ----- docs/changelog/112303.yaml | 5 ----- docs/changelog/112320.yaml | 5 ----- docs/changelog/112330.yaml | 5 ----- docs/changelog/112337.yaml | 5 ----- docs/changelog/112341.yaml | 5 ----- docs/changelog/112345.yaml | 8 -------- docs/changelog/112348.yaml | 6 ------ docs/changelog/112350.yaml | 5 ----- docs/changelog/112369.yaml | 5 ----- docs/changelog/112397.yaml | 5 ----- docs/changelog/112401.yaml | 6 ------ docs/changelog/112405.yaml | 6 ------ docs/changelog/112409.yaml | 6 ------ docs/changelog/112412.yaml | 5 ----- docs/changelog/112431.yaml | 6 ------ docs/changelog/112440.yaml | 5 ----- docs/changelog/112451.yaml | 29 ----------------------------- docs/changelog/112481.yaml | 5 ----- docs/changelog/112489.yaml | 6 ------ docs/changelog/112508.yaml | 5 ----- docs/changelog/112512.yaml | 5 ----- docs/changelog/112519.yaml | 5 ----- docs/changelog/112547.yaml | 5 ----- docs/changelog/112565.yaml | 5 ----- docs/changelog/112571.yaml | 17 ----------------- docs/changelog/112574.yaml | 5 ----- docs/changelog/112595.yaml | 6 ------ docs/changelog/112612.yaml | 5 ----- docs/changelog/112645.yaml | 6 ------ docs/changelog/112652.yaml | 5 ----- docs/changelog/112665.yaml | 14 -------------- docs/changelog/112677.yaml | 5 ----- docs/changelog/112678.yaml | 6 ------ docs/changelog/112687.yaml | 5 ----- docs/changelog/112706.yaml | 5 ----- docs/changelog/112707.yaml | 5 ----- docs/changelog/112723.yaml | 6 ------ docs/changelog/112768.yaml | 5 ----- docs/changelog/112826.yaml | 6 ------ docs/changelog/112850.yaml | 5 ----- docs/changelog/112874.yaml | 5 ----- docs/changelog/112888.yaml | 5 ----- docs/changelog/112895.yaml | 5 ----- docs/changelog/112905.yaml | 5 ----- docs/changelog/112916.yaml | 5 ----- docs/changelog/112929.yaml | 5 ----- docs/changelog/112933.yaml | 5 ----- docs/changelog/112938.yaml | 35 ----------------------------------- docs/changelog/112972.yaml | 6 ------ docs/changelog/112973.yaml | 5 ----- docs/changelog/113013.yaml | 5 ----- docs/changelog/113027.yaml | 6 ------ docs/changelog/113051.yaml | 5 ----- docs/changelog/113103.yaml | 6 ------ docs/changelog/113143.yaml | 10 ---------- docs/changelog/113158.yaml | 5 ----- docs/changelog/113172.yaml | 6 ------ docs/changelog/113183.yaml | 6 ------ docs/changelog/113187.yaml | 5 ----- docs/changelog/113251.yaml | 5 ----- docs/changelog/113276.yaml | 5 ----- docs/changelog/113280.yaml | 5 ----- docs/changelog/113286.yaml | 10 ---------- docs/changelog/113297.yaml | 5 ----- docs/changelog/113314.yaml | 6 ------ docs/changelog/113333.yaml | 5 ----- docs/changelog/113373.yaml | 6 ------ docs/changelog/113374.yaml | 5 ----- docs/changelog/113385.yaml | 5 ----- docs/changelog/113387.yaml | 5 ----- docs/changelog/113498.yaml | 5 ----- docs/changelog/113499.yaml | 6 ------ docs/changelog/113552.yaml | 5 ----- docs/changelog/113570.yaml | 7 ------- docs/changelog/113588.yaml | 5 ----- docs/changelog/113607.yaml | 5 ----- docs/changelog/113613.yaml | 7 ------- docs/changelog/113623.yaml | 6 ------ docs/changelog/113690.yaml | 5 ----- docs/changelog/113735.yaml | 28 ---------------------------- docs/changelog/113812.yaml | 5 ----- docs/changelog/113816.yaml | 5 ----- docs/changelog/113825.yaml | 12 ------------ docs/changelog/113873.yaml | 5 ----- docs/changelog/113897.yaml | 6 ------ docs/changelog/113910.yaml | 5 ----- docs/changelog/113911.yaml | 5 ----- docs/changelog/113967.yaml | 13 ------------- docs/changelog/113975.yaml | 19 ------------------- docs/changelog/113981.yaml | 6 ------ docs/changelog/113988.yaml | 5 ----- docs/changelog/113989.yaml | 5 ----- docs/changelog/114021.yaml | 5 ----- docs/changelog/114080.yaml | 5 ----- docs/changelog/114109.yaml | 5 ----- docs/changelog/114128.yaml | 5 ----- docs/changelog/114157.yaml | 6 ------ docs/changelog/114168.yaml | 5 ----- docs/changelog/114234.yaml | 5 ----- docs/changelog/114271.yaml | 5 ----- docs/changelog/114295.yaml | 5 ----- docs/changelog/114309.yaml | 6 ------ docs/changelog/114321.yaml | 5 ----- docs/changelog/114358.yaml | 5 ----- docs/changelog/114363.yaml | 5 ----- docs/changelog/114368.yaml | 5 ----- docs/changelog/114375.yaml | 5 ----- docs/changelog/114382.yaml | 5 ----- docs/changelog/114386.yaml | 5 ----- docs/changelog/114389.yaml | 5 ----- docs/changelog/114411.yaml | 5 ----- docs/changelog/114429.yaml | 5 ----- docs/changelog/114439.yaml | 5 ----- docs/changelog/114453.yaml | 5 ----- docs/changelog/114457.yaml | 6 ------ docs/changelog/114464.yaml | 5 ----- docs/changelog/114512.yaml | 5 ----- docs/changelog/114527.yaml | 5 ----- docs/changelog/114549.yaml | 5 ----- docs/changelog/114552.yaml | 5 ----- docs/changelog/114596.yaml | 5 ----- docs/changelog/114638.yaml | 7 ------- docs/changelog/114683.yaml | 5 ----- docs/changelog/114715.yaml | 5 ----- docs/changelog/114719.yaml | 5 ----- docs/changelog/114732.yaml | 5 ----- docs/changelog/114750.yaml | 5 ----- docs/changelog/114774.yaml | 5 ----- docs/changelog/114784.yaml | 5 ----- docs/changelog/114836.yaml | 6 ------ docs/changelog/114848.yaml | 5 ----- docs/changelog/114854.yaml | 10 ---------- docs/changelog/114856.yaml | 5 ----- docs/changelog/114888.yaml | 6 ------ docs/changelog/114951.yaml | 5 ----- docs/changelog/114990.yaml | 6 ------ docs/changelog/115031.yaml | 5 ----- docs/changelog/115048.yaml | 5 ----- docs/changelog/115061.yaml | 5 ----- docs/changelog/115117.yaml | 6 ------ docs/changelog/115147.yaml | 5 ----- docs/changelog/115194.yaml | 7 ------- docs/changelog/115245.yaml | 8 -------- docs/changelog/115312.yaml | 6 ------ docs/changelog/115317.yaml | 5 ----- docs/changelog/115399.yaml | 29 ----------------------------- docs/changelog/115404.yaml | 5 ----- docs/changelog/115429.yaml | 5 ----- docs/changelog/115594.yaml | 6 ------ docs/changelog/115624.yaml | 7 ------- docs/changelog/115656.yaml | 5 ----- docs/changelog/115715.yaml | 5 ----- docs/changelog/115811.yaml | 5 ----- docs/changelog/115823.yaml | 5 ----- docs/changelog/115868.yaml | 5 ----- docs/changelog/115952.yaml | 5 ----- docs/changelog/116015.yaml | 6 ------ docs/changelog/116086.yaml | 6 ------ docs/changelog/116212.yaml | 6 ------ docs/changelog/116266.yaml | 5 ----- docs/changelog/116274.yaml | 5 ----- 302 files changed, 1880 deletions(-) delete mode 100644 docs/changelog/106520.yaml delete mode 100644 docs/changelog/107047.yaml delete mode 100644 docs/changelog/107936.yaml delete mode 100644 docs/changelog/109017.yaml delete mode 100644 docs/changelog/109193.yaml delete mode 100644 docs/changelog/109414.yaml delete mode 100644 docs/changelog/109583.yaml delete mode 100644 docs/changelog/109667.yaml delete mode 100644 docs/changelog/109684.yaml delete mode 100644 docs/changelog/110021.yaml delete mode 100644 docs/changelog/110116.yaml delete mode 100644 docs/changelog/110216.yaml delete mode 100644 docs/changelog/110237.yaml delete mode 100644 docs/changelog/110399.yaml delete mode 100644 docs/changelog/110427.yaml delete mode 100644 docs/changelog/110520.yaml delete mode 100644 docs/changelog/110524.yaml delete mode 100644 docs/changelog/110527.yaml delete mode 100644 docs/changelog/110554.yaml delete mode 100644 docs/changelog/110574.yaml delete mode 100644 docs/changelog/110578.yaml delete mode 100644 docs/changelog/110593.yaml delete mode 100644 docs/changelog/110603.yaml delete mode 100644 docs/changelog/110606.yaml delete mode 100644 docs/changelog/110630.yaml delete mode 100644 docs/changelog/110633.yaml delete mode 100644 docs/changelog/110669.yaml delete mode 100644 docs/changelog/110676.yaml delete mode 100644 docs/changelog/110677.yaml delete mode 100644 docs/changelog/110718.yaml delete mode 100644 docs/changelog/110734.yaml delete mode 100644 docs/changelog/110796.yaml delete mode 100644 docs/changelog/110816.yaml delete mode 100644 docs/changelog/110829.yaml delete mode 100644 docs/changelog/110833.yaml delete mode 100644 docs/changelog/110846.yaml delete mode 100644 docs/changelog/110847.yaml delete mode 100644 docs/changelog/110860.yaml delete mode 100644 docs/changelog/110879.yaml delete mode 100644 docs/changelog/110901.yaml delete mode 100644 docs/changelog/110921.yaml delete mode 100644 docs/changelog/110928.yaml delete mode 100644 docs/changelog/110951.yaml delete mode 100644 docs/changelog/110971.yaml delete mode 100644 docs/changelog/110974.yaml delete mode 100644 docs/changelog/110986.yaml delete mode 100644 docs/changelog/110993.yaml delete mode 100644 docs/changelog/111015.yaml delete mode 100644 docs/changelog/111064.yaml delete mode 100644 docs/changelog/111071.yaml delete mode 100644 docs/changelog/111079.yaml delete mode 100644 docs/changelog/111091.yaml delete mode 100644 docs/changelog/111105.yaml delete mode 100644 docs/changelog/111118.yaml delete mode 100644 docs/changelog/111123.yaml delete mode 100644 docs/changelog/111154.yaml delete mode 100644 docs/changelog/111161.yaml delete mode 100644 docs/changelog/111181.yaml delete mode 100644 docs/changelog/111193.yaml delete mode 100644 docs/changelog/111212.yaml delete mode 100644 docs/changelog/111215.yaml delete mode 100644 docs/changelog/111225.yaml delete mode 100644 docs/changelog/111226.yaml delete mode 100644 docs/changelog/111238.yaml delete mode 100644 docs/changelog/111245.yaml delete mode 100644 docs/changelog/111274.yaml delete mode 100644 docs/changelog/111284.yaml delete mode 100644 docs/changelog/111311.yaml delete mode 100644 docs/changelog/111315.yaml delete mode 100644 docs/changelog/111316.yaml delete mode 100644 docs/changelog/111336.yaml delete mode 100644 docs/changelog/111344.yaml delete mode 100644 docs/changelog/111367.yaml delete mode 100644 docs/changelog/111412.yaml delete mode 100644 docs/changelog/111413.yaml delete mode 100644 docs/changelog/111420.yaml delete mode 100644 docs/changelog/111437.yaml delete mode 100644 docs/changelog/111445.yaml delete mode 100644 docs/changelog/111457.yaml delete mode 100644 docs/changelog/111465.yaml delete mode 100644 docs/changelog/111490.yaml delete mode 100644 docs/changelog/111501.yaml delete mode 100644 docs/changelog/111516.yaml delete mode 100644 docs/changelog/111523.yaml delete mode 100644 docs/changelog/111544.yaml delete mode 100644 docs/changelog/111552.yaml delete mode 100644 docs/changelog/111576.yaml delete mode 100644 docs/changelog/111600.yaml delete mode 100644 docs/changelog/111624.yaml delete mode 100644 docs/changelog/111644.yaml delete mode 100644 docs/changelog/111655.yaml delete mode 100644 docs/changelog/111683.yaml delete mode 100644 docs/changelog/111689.yaml delete mode 100644 docs/changelog/111690.yaml delete mode 100644 docs/changelog/111740.yaml delete mode 100644 docs/changelog/111749.yaml delete mode 100644 docs/changelog/111770.yaml delete mode 100644 docs/changelog/111779.yaml delete mode 100644 docs/changelog/111797.yaml delete mode 100644 docs/changelog/111809.yaml delete mode 100644 docs/changelog/111818.yaml delete mode 100644 docs/changelog/111840.yaml delete mode 100644 docs/changelog/111855.yaml delete mode 100644 docs/changelog/111874.yaml delete mode 100644 docs/changelog/111879.yaml delete mode 100644 docs/changelog/111915.yaml delete mode 100644 docs/changelog/111917.yaml delete mode 100644 docs/changelog/111937.yaml delete mode 100644 docs/changelog/111948.yaml delete mode 100644 docs/changelog/111950.yaml delete mode 100644 docs/changelog/111955.yaml delete mode 100644 docs/changelog/111968.yaml delete mode 100644 docs/changelog/111969.yaml delete mode 100644 docs/changelog/111972.yaml delete mode 100644 docs/changelog/111981.yaml delete mode 100644 docs/changelog/112019.yaml delete mode 100644 docs/changelog/112024.yaml delete mode 100644 docs/changelog/112026.yaml delete mode 100644 docs/changelog/112055.yaml delete mode 100644 docs/changelog/112058.yaml delete mode 100644 docs/changelog/112063.yaml delete mode 100644 docs/changelog/112066.yaml delete mode 100644 docs/changelog/112081.yaml delete mode 100644 docs/changelog/112100.yaml delete mode 100644 docs/changelog/112123.yaml delete mode 100644 docs/changelog/112126.yaml delete mode 100644 docs/changelog/112133.yaml delete mode 100644 docs/changelog/112151.yaml delete mode 100644 docs/changelog/112199.yaml delete mode 100644 docs/changelog/112200.yaml delete mode 100644 docs/changelog/112210.yaml delete mode 100644 docs/changelog/112214.yaml delete mode 100644 docs/changelog/112218.yaml delete mode 100644 docs/changelog/112262.yaml delete mode 100644 docs/changelog/112263.yaml delete mode 100644 docs/changelog/112270.yaml delete mode 100644 docs/changelog/112273.yaml delete mode 100644 docs/changelog/112277.yaml delete mode 100644 docs/changelog/112282.yaml delete mode 100644 docs/changelog/112294.yaml delete mode 100644 docs/changelog/112295.yaml delete mode 100644 docs/changelog/112303.yaml delete mode 100644 docs/changelog/112320.yaml delete mode 100644 docs/changelog/112330.yaml delete mode 100644 docs/changelog/112337.yaml delete mode 100644 docs/changelog/112341.yaml delete mode 100644 docs/changelog/112345.yaml delete mode 100644 docs/changelog/112348.yaml delete mode 100644 docs/changelog/112350.yaml delete mode 100644 docs/changelog/112369.yaml delete mode 100644 docs/changelog/112397.yaml delete mode 100644 docs/changelog/112401.yaml delete mode 100644 docs/changelog/112405.yaml delete mode 100644 docs/changelog/112409.yaml delete mode 100644 docs/changelog/112412.yaml delete mode 100644 docs/changelog/112431.yaml delete mode 100644 docs/changelog/112440.yaml delete mode 100644 docs/changelog/112451.yaml delete mode 100644 docs/changelog/112481.yaml delete mode 100644 docs/changelog/112489.yaml delete mode 100644 docs/changelog/112508.yaml delete mode 100644 docs/changelog/112512.yaml delete mode 100644 docs/changelog/112519.yaml delete mode 100644 docs/changelog/112547.yaml delete mode 100644 docs/changelog/112565.yaml delete mode 100644 docs/changelog/112571.yaml delete mode 100644 docs/changelog/112574.yaml delete mode 100644 docs/changelog/112595.yaml delete mode 100644 docs/changelog/112612.yaml delete mode 100644 docs/changelog/112645.yaml delete mode 100644 docs/changelog/112652.yaml delete mode 100644 docs/changelog/112665.yaml delete mode 100644 docs/changelog/112677.yaml delete mode 100644 docs/changelog/112678.yaml delete mode 100644 docs/changelog/112687.yaml delete mode 100644 docs/changelog/112706.yaml delete mode 100644 docs/changelog/112707.yaml delete mode 100644 docs/changelog/112723.yaml delete mode 100644 docs/changelog/112768.yaml delete mode 100644 docs/changelog/112826.yaml delete mode 100644 docs/changelog/112850.yaml delete mode 100644 docs/changelog/112874.yaml delete mode 100644 docs/changelog/112888.yaml delete mode 100644 docs/changelog/112895.yaml delete mode 100644 docs/changelog/112905.yaml delete mode 100644 docs/changelog/112916.yaml delete mode 100644 docs/changelog/112929.yaml delete mode 100644 docs/changelog/112933.yaml delete mode 100644 docs/changelog/112938.yaml delete mode 100644 docs/changelog/112972.yaml delete mode 100644 docs/changelog/112973.yaml delete mode 100644 docs/changelog/113013.yaml delete mode 100644 docs/changelog/113027.yaml delete mode 100644 docs/changelog/113051.yaml delete mode 100644 docs/changelog/113103.yaml delete mode 100644 docs/changelog/113143.yaml delete mode 100644 docs/changelog/113158.yaml delete mode 100644 docs/changelog/113172.yaml delete mode 100644 docs/changelog/113183.yaml delete mode 100644 docs/changelog/113187.yaml delete mode 100644 docs/changelog/113251.yaml delete mode 100644 docs/changelog/113276.yaml delete mode 100644 docs/changelog/113280.yaml delete mode 100644 docs/changelog/113286.yaml delete mode 100644 docs/changelog/113297.yaml delete mode 100644 docs/changelog/113314.yaml delete mode 100644 docs/changelog/113333.yaml delete mode 100644 docs/changelog/113373.yaml delete mode 100644 docs/changelog/113374.yaml delete mode 100644 docs/changelog/113385.yaml delete mode 100644 docs/changelog/113387.yaml delete mode 100644 docs/changelog/113498.yaml delete mode 100644 docs/changelog/113499.yaml delete mode 100644 docs/changelog/113552.yaml delete mode 100644 docs/changelog/113570.yaml delete mode 100644 docs/changelog/113588.yaml delete mode 100644 docs/changelog/113607.yaml delete mode 100644 docs/changelog/113613.yaml delete mode 100644 docs/changelog/113623.yaml delete mode 100644 docs/changelog/113690.yaml delete mode 100644 docs/changelog/113735.yaml delete mode 100644 docs/changelog/113812.yaml delete mode 100644 docs/changelog/113816.yaml delete mode 100644 docs/changelog/113825.yaml delete mode 100644 docs/changelog/113873.yaml delete mode 100644 docs/changelog/113897.yaml delete mode 100644 docs/changelog/113910.yaml delete mode 100644 docs/changelog/113911.yaml delete mode 100644 docs/changelog/113967.yaml delete mode 100644 docs/changelog/113975.yaml delete mode 100644 docs/changelog/113981.yaml delete mode 100644 docs/changelog/113988.yaml delete mode 100644 docs/changelog/113989.yaml delete mode 100644 docs/changelog/114021.yaml delete mode 100644 docs/changelog/114080.yaml delete mode 100644 docs/changelog/114109.yaml delete mode 100644 docs/changelog/114128.yaml delete mode 100644 docs/changelog/114157.yaml delete mode 100644 docs/changelog/114168.yaml delete mode 100644 docs/changelog/114234.yaml delete mode 100644 docs/changelog/114271.yaml delete mode 100644 docs/changelog/114295.yaml delete mode 100644 docs/changelog/114309.yaml delete mode 100644 docs/changelog/114321.yaml delete mode 100644 docs/changelog/114358.yaml delete mode 100644 docs/changelog/114363.yaml delete mode 100644 docs/changelog/114368.yaml delete mode 100644 docs/changelog/114375.yaml delete mode 100644 docs/changelog/114382.yaml delete mode 100644 docs/changelog/114386.yaml delete mode 100644 docs/changelog/114389.yaml delete mode 100644 docs/changelog/114411.yaml delete mode 100644 docs/changelog/114429.yaml delete mode 100644 docs/changelog/114439.yaml delete mode 100644 docs/changelog/114453.yaml delete mode 100644 docs/changelog/114457.yaml delete mode 100644 docs/changelog/114464.yaml delete mode 100644 docs/changelog/114512.yaml delete mode 100644 docs/changelog/114527.yaml delete mode 100644 docs/changelog/114549.yaml delete mode 100644 docs/changelog/114552.yaml delete mode 100644 docs/changelog/114596.yaml delete mode 100644 docs/changelog/114638.yaml delete mode 100644 docs/changelog/114683.yaml delete mode 100644 docs/changelog/114715.yaml delete mode 100644 docs/changelog/114719.yaml delete mode 100644 docs/changelog/114732.yaml delete mode 100644 docs/changelog/114750.yaml delete mode 100644 docs/changelog/114774.yaml delete mode 100644 docs/changelog/114784.yaml delete mode 100644 docs/changelog/114836.yaml delete mode 100644 docs/changelog/114848.yaml delete mode 100644 docs/changelog/114854.yaml delete mode 100644 docs/changelog/114856.yaml delete mode 100644 docs/changelog/114888.yaml delete mode 100644 docs/changelog/114951.yaml delete mode 100644 docs/changelog/114990.yaml delete mode 100644 docs/changelog/115031.yaml delete mode 100644 docs/changelog/115048.yaml delete mode 100644 docs/changelog/115061.yaml delete mode 100644 docs/changelog/115117.yaml delete mode 100644 docs/changelog/115147.yaml delete mode 100644 docs/changelog/115194.yaml delete mode 100644 docs/changelog/115245.yaml delete mode 100644 docs/changelog/115312.yaml delete mode 100644 docs/changelog/115317.yaml delete mode 100644 docs/changelog/115399.yaml delete mode 100644 docs/changelog/115404.yaml delete mode 100644 docs/changelog/115429.yaml delete mode 100644 docs/changelog/115594.yaml delete mode 100644 docs/changelog/115624.yaml delete mode 100644 docs/changelog/115656.yaml delete mode 100644 docs/changelog/115715.yaml delete mode 100644 docs/changelog/115811.yaml delete mode 100644 docs/changelog/115823.yaml delete mode 100644 docs/changelog/115868.yaml delete mode 100644 docs/changelog/115952.yaml delete mode 100644 docs/changelog/116015.yaml delete mode 100644 docs/changelog/116086.yaml delete mode 100644 docs/changelog/116212.yaml delete mode 100644 docs/changelog/116266.yaml delete mode 100644 docs/changelog/116274.yaml diff --git a/docs/changelog/106520.yaml b/docs/changelog/106520.yaml deleted file mode 100644 index c3fe69a4c3dbd..0000000000000 --- a/docs/changelog/106520.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106520 -summary: Updated the transport CA name in Security Auto-Configuration. -area: Security -type: bug -issues: - - 106455 diff --git a/docs/changelog/107047.yaml b/docs/changelog/107047.yaml deleted file mode 100644 index 89caed6f55074..0000000000000 --- a/docs/changelog/107047.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107047 -summary: "Search/Mapping: KnnVectorQueryBuilder support for allowUnmappedFields" -area: Search -type: bug -issues: - - 106846 diff --git a/docs/changelog/107936.yaml b/docs/changelog/107936.yaml deleted file mode 100644 index 89dd57f7a81a5..0000000000000 --- a/docs/changelog/107936.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 107936 -summary: Two empty mappings now are created equally -area: Mapping -type: bug -issues: - - 107031 diff --git a/docs/changelog/109017.yaml b/docs/changelog/109017.yaml deleted file mode 100644 index 80bcdd6fc0e25..0000000000000 --- a/docs/changelog/109017.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109017 -summary: "ESQL: Add `MV_PSERIES_WEIGHTED_SUM` for score calculations used by security\ - \ solution" -area: ES|QL -type: "feature" -issues: [ ] diff --git a/docs/changelog/109193.yaml b/docs/changelog/109193.yaml deleted file mode 100644 index 5cc664eaee2cd..0000000000000 --- a/docs/changelog/109193.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109193 -summary: "[ES|QL] explicit cast a string literal to `date_period` and `time_duration`\ - \ in arithmetic operations" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/109414.yaml b/docs/changelog/109414.yaml deleted file mode 100644 index 81b7541bde35b..0000000000000 --- a/docs/changelog/109414.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 109414 -summary: Don't fail retention lease sync actions due to capacity constraints -area: CRUD -type: bug -issues: - - 105926 diff --git a/docs/changelog/109583.yaml b/docs/changelog/109583.yaml deleted file mode 100644 index 84757e307b4fb..0000000000000 --- a/docs/changelog/109583.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 109583 -summary: "ESQL: INLINESTATS" -area: ES|QL -type: feature -issues: - - 107589 -highlight: - title: "ESQL: INLINESTATS" - body: |- - This adds the `INLINESTATS` command to ESQL which performs a STATS and - then enriches the results into the output stream. So, this query: - - [source,esql] - ---- - FROM test - | INLINESTATS m=MAX(a * b) BY b - | WHERE m == a * b - | SORT a DESC, b DESC - | LIMIT 3 - ---- - - Produces output like: - - | a | b | m | - | --- | --- | ----- | - | 99 | 999 | 98901 | - | 99 | 998 | 98802 | - | 99 | 997 | 98703 | - notable: true diff --git a/docs/changelog/109667.yaml b/docs/changelog/109667.yaml deleted file mode 100644 index 782a1b1cf6c9b..0000000000000 --- a/docs/changelog/109667.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109667 -summary: Inference autoscaling -area: Machine Learning -type: feature -issues: [] diff --git a/docs/changelog/109684.yaml b/docs/changelog/109684.yaml deleted file mode 100644 index 156f568290cf5..0000000000000 --- a/docs/changelog/109684.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 109684 -summary: Avoid `ModelAssignment` deadlock -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/110021.yaml b/docs/changelog/110021.yaml deleted file mode 100644 index 51878b960dfd0..0000000000000 --- a/docs/changelog/110021.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110021 -summary: "[ES|QL] validate `mv_sort` order" -area: ES|QL -type: bug -issues: - - 109910 diff --git a/docs/changelog/110116.yaml b/docs/changelog/110116.yaml deleted file mode 100644 index 9c309b8b80311..0000000000000 --- a/docs/changelog/110116.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110116 -summary: "[ESQL] Make query wrapped by `SingleValueQuery` cacheable" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/110216.yaml b/docs/changelog/110216.yaml deleted file mode 100644 index 00ab20b230e2c..0000000000000 --- a/docs/changelog/110216.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110216 -summary: Register SLM run before snapshotting to save stats -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/110237.yaml b/docs/changelog/110237.yaml deleted file mode 100644 index 076855385376c..0000000000000 --- a/docs/changelog/110237.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 110237 -summary: Optimize the loop processing of URL decoding -area: Infra/REST API -type: enhancement -issues: - - 110235 - diff --git a/docs/changelog/110399.yaml b/docs/changelog/110399.yaml deleted file mode 100644 index 9e04e2656809e..0000000000000 --- a/docs/changelog/110399.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110399 -summary: "[Inference API] Prevent inference endpoints from being deleted if they are\ - \ referenced by semantic text" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/110427.yaml b/docs/changelog/110427.yaml deleted file mode 100644 index ba8a1246e90e4..0000000000000 --- a/docs/changelog/110427.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110427 -summary: "[Inference API] Remove unused Cohere rerank service settings fields in a\ - \ BWC way" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/110520.yaml b/docs/changelog/110520.yaml deleted file mode 100644 index fba4b84e2279e..0000000000000 --- a/docs/changelog/110520.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110520 -summary: Add protection for OOM during aggregations partial reduction -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/110524.yaml b/docs/changelog/110524.yaml deleted file mode 100644 index 6274c99b09998..0000000000000 --- a/docs/changelog/110524.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110524 -summary: Introduce mode `subobjects=auto` for objects -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/110527.yaml b/docs/changelog/110527.yaml deleted file mode 100644 index 3ab19ecaaaa76..0000000000000 --- a/docs/changelog/110527.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110527 -summary: "ESQL: Add boolean support to Max and Min aggs" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110554.yaml b/docs/changelog/110554.yaml deleted file mode 100644 index 8c0b896a4c979..0000000000000 --- a/docs/changelog/110554.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110554 -summary: Fix `MapperBuilderContext#isDataStream` when used in dynamic mappers -area: "Mapping" -type: bug -issues: [] diff --git a/docs/changelog/110574.yaml b/docs/changelog/110574.yaml deleted file mode 100644 index 1840838500151..0000000000000 --- a/docs/changelog/110574.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110574 -summary: "ES|QL: better validation for GROK patterns" -area: ES|QL -type: bug -issues: - - 110533 diff --git a/docs/changelog/110578.yaml b/docs/changelog/110578.yaml deleted file mode 100644 index 5d48171e4f328..0000000000000 --- a/docs/changelog/110578.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110578 -summary: Add `size_in_bytes` to enrich cache stats -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/110593.yaml b/docs/changelog/110593.yaml deleted file mode 100644 index 21a5d426ceb46..0000000000000 --- a/docs/changelog/110593.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110593 -summary: "[ES|QL] add tests for stats by constant" -area: ES|QL -type: bug -issues: - - 105383 diff --git a/docs/changelog/110603.yaml b/docs/changelog/110603.yaml deleted file mode 100644 index 4ba19985853df..0000000000000 --- a/docs/changelog/110603.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110603 -summary: Stop iterating over all fields to extract @timestamp value -area: TSDB -type: enhancement -issues: - - 92297 diff --git a/docs/changelog/110606.yaml b/docs/changelog/110606.yaml deleted file mode 100644 index d4ab5234289c4..0000000000000 --- a/docs/changelog/110606.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110606 -summary: Adding mapping validation to the simulate ingest API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/110630.yaml b/docs/changelog/110630.yaml deleted file mode 100644 index 9bf78e1209753..0000000000000 --- a/docs/changelog/110630.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110630 -summary: Telemetry for inference adaptive allocations -area: Machine Learning -type: feature -issues: [] diff --git a/docs/changelog/110633.yaml b/docs/changelog/110633.yaml deleted file mode 100644 index d4d1dc68cdbcc..0000000000000 --- a/docs/changelog/110633.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110633 -summary: Add manage roles privilege -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/110669.yaml b/docs/changelog/110669.yaml deleted file mode 100644 index 301e756ca373c..0000000000000 --- a/docs/changelog/110669.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110669 -summary: "[ES|QL] Use `RangeQuery` and String in `BinaryComparison` on datetime fields" -area: ES|QL -type: bug -issues: - - 107900 diff --git a/docs/changelog/110676.yaml b/docs/changelog/110676.yaml deleted file mode 100644 index efe7e0e55f18f..0000000000000 --- a/docs/changelog/110676.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110676 -summary: Allow querying `index_mode` -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/110677.yaml b/docs/changelog/110677.yaml deleted file mode 100644 index 72fe5129f3b9d..0000000000000 --- a/docs/changelog/110677.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110677 -summary: Add validation for synthetic source mode in logs mode indices -area: Logs -type: enhancement -issues: [] diff --git a/docs/changelog/110718.yaml b/docs/changelog/110718.yaml deleted file mode 100644 index 526083a8add0c..0000000000000 --- a/docs/changelog/110718.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110718 -summary: "ESQL: Add boolean support to TOP aggregation" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110734.yaml b/docs/changelog/110734.yaml deleted file mode 100644 index d6dce144b89cd..0000000000000 --- a/docs/changelog/110734.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110734 -summary: Fix bug in ML serverless autoscaling which prevented trained model updates from triggering a scale up -area: Machine Learning -type: bug -issues: [ ] diff --git a/docs/changelog/110796.yaml b/docs/changelog/110796.yaml deleted file mode 100644 index a54a9a08bbd27..0000000000000 --- a/docs/changelog/110796.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110796 -summary: Remove needless forking to GENERIC in `TransportMultiSearchAction` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/110816.yaml b/docs/changelog/110816.yaml deleted file mode 100644 index bf707376ec9ea..0000000000000 --- a/docs/changelog/110816.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110816 -summary: GET _cluster/settings with include_defaults returns the expected fallback value if defined in elasticsearch.yml -area: Infra/Settings -type: bug -issues: - - 110815 diff --git a/docs/changelog/110829.yaml b/docs/changelog/110829.yaml deleted file mode 100644 index 365a14436ec89..0000000000000 --- a/docs/changelog/110829.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 110829 -summary: deprecate `edge_ngram` side parameter -area: Analysis -type: deprecation -issues: [] -deprecation: - title: deprecate `edge_ngram` side parameter - area: Analysis - details: edge_ngram will no longer accept the side parameter. - impact: Users will need to update any usage of edge_ngram token filter that utilizes `side`. If the `back` value was used, they can achieve the same behavior by using the `reverse` token filter. diff --git a/docs/changelog/110833.yaml b/docs/changelog/110833.yaml deleted file mode 100644 index 008fc489ed731..0000000000000 --- a/docs/changelog/110833.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110833 -summary: Make empty string searches be consistent with case (in)sensitivity -area: Search -type: bug -issues: [] diff --git a/docs/changelog/110846.yaml b/docs/changelog/110846.yaml deleted file mode 100644 index 56cc65e83648c..0000000000000 --- a/docs/changelog/110846.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110846 -summary: Fix MLTQuery handling of custom term frequencies -area: Ranking -type: bug -issues: [] diff --git a/docs/changelog/110847.yaml b/docs/changelog/110847.yaml deleted file mode 100644 index 214adc97ac7cb..0000000000000 --- a/docs/changelog/110847.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110847 -summary: SLM Interval based scheduling -area: ILM+SLM -type: feature -issues: [] diff --git a/docs/changelog/110860.yaml b/docs/changelog/110860.yaml deleted file mode 100644 index 5649ca4c88362..0000000000000 --- a/docs/changelog/110860.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110860 -summary: Speedup `CanMatchPreFilterSearchPhase` constructor -area: Search -type: bug -issues: [] diff --git a/docs/changelog/110879.yaml b/docs/changelog/110879.yaml deleted file mode 100644 index d114c6c2aa472..0000000000000 --- a/docs/changelog/110879.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110879 -summary: Add EXP ES|QL function -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/110901.yaml b/docs/changelog/110901.yaml deleted file mode 100644 index 599cb7ce9ec98..0000000000000 --- a/docs/changelog/110901.yaml +++ /dev/null @@ -1,15 +0,0 @@ -pr: 110901 -summary: Set lenient to true by default when using updateable synonyms -area: Analysis -type: breaking -issues: [] -breaking: - title: Set lenient to true by default when using updateable synonyms - area: Analysis - details: | - When a `synonym` or `synonym_graph` token filter is configured with `updateable: true`, the default `lenient` - value will now be `true`. - impact: | - `synonym` or `synonym_graph` token filters configured with `updateable: true` will ignore invalid synonyms by - default. This prevents shard initialization errors on invalid synonyms. - notable: true diff --git a/docs/changelog/110921.yaml b/docs/changelog/110921.yaml deleted file mode 100644 index 28cd569404945..0000000000000 --- a/docs/changelog/110921.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110921 -summary: "ESQL: Support IP fields in MAX and MIN aggregations" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110928.yaml b/docs/changelog/110928.yaml deleted file mode 100644 index dcb2df6e6cca9..0000000000000 --- a/docs/changelog/110928.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110928 -summary: Dense vector field types updatable for int4 -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/110951.yaml b/docs/changelog/110951.yaml deleted file mode 100644 index ec8bc9cae6347..0000000000000 --- a/docs/changelog/110951.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110951 -summary: Allow task canceling of validate API calls -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/110971.yaml b/docs/changelog/110971.yaml deleted file mode 100644 index 3579f77dc0d1d..0000000000000 --- a/docs/changelog/110971.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110971 -summary: "Search in ES|QL: Add MATCH operator" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/110974.yaml b/docs/changelog/110974.yaml deleted file mode 100644 index c9e8c9b78675e..0000000000000 --- a/docs/changelog/110974.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110974 -summary: Add custom rule parameters to force time shift -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/110986.yaml b/docs/changelog/110986.yaml deleted file mode 100644 index 4e320b19c9578..0000000000000 --- a/docs/changelog/110986.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 110986 -summary: Fix unnecessary mustache template evaluation -area: Ingest Node -type: enhancement -issues: - - 110191 diff --git a/docs/changelog/110993.yaml b/docs/changelog/110993.yaml deleted file mode 100644 index 9eb653a09e3a4..0000000000000 --- a/docs/changelog/110993.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110993 -summary: Add link to Max Shards Per Node exception message -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/111015.yaml b/docs/changelog/111015.yaml deleted file mode 100644 index 3cc363c8bbf6b..0000000000000 --- a/docs/changelog/111015.yaml +++ /dev/null @@ -1,15 +0,0 @@ -pr: 111015 -summary: Always allow rebalancing by default -area: Allocation -type: enhancement -issues: [] -highlight: - title: Always allow rebalancing by default - body: |- - In earlier versions of {es} the `cluster.routing.allocation.allow_rebalance` setting defaults to - `indices_all_active` which blocks all rebalancing moves while the cluster is in `yellow` or `red` health. This was - appropriate for the legacy allocator which might do too many rebalancing moves otherwise. Today's allocator has - better support for rebalancing a cluster that is not in `green` health, and expects to be able to rebalance some - shards away from over-full nodes to avoid allocating shards to undesirable locations in the first place. From - version 8.16 `allow_rebalance` setting defaults to `always` unless the legacy allocator is explicitly enabled. - notable: true diff --git a/docs/changelog/111064.yaml b/docs/changelog/111064.yaml deleted file mode 100644 index 848da842b090e..0000000000000 --- a/docs/changelog/111064.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111064 -summary: "ESQL: Fix Double operations returning infinite" -area: ES|QL -type: bug -issues: - - 111026 diff --git a/docs/changelog/111071.yaml b/docs/changelog/111071.yaml deleted file mode 100644 index 5e8ab53db3d03..0000000000000 --- a/docs/changelog/111071.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111071 -summary: Use native scalar scorer for int8_flat index -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/111079.yaml b/docs/changelog/111079.yaml deleted file mode 100644 index aac22005f912d..0000000000000 --- a/docs/changelog/111079.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111079 -summary: PUT slm policy should only increase version if actually changed -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/111091.yaml b/docs/changelog/111091.yaml deleted file mode 100644 index 8444681a14a48..0000000000000 --- a/docs/changelog/111091.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111091 -summary: "X-pack/plugin/otel: introduce x-pack-otel plugin" -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/111105.yaml b/docs/changelog/111105.yaml deleted file mode 100644 index ed32bd1ef7fc3..0000000000000 --- a/docs/changelog/111105.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111105 -summary: "ESQL: TOP aggregation IP support" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/111118.yaml b/docs/changelog/111118.yaml deleted file mode 100644 index c9fe6cb443688..0000000000000 --- a/docs/changelog/111118.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111118 -summary: "[ES|QL] Simplify patterns for subfields" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/111123.yaml b/docs/changelog/111123.yaml deleted file mode 100644 index 605b8607f4082..0000000000000 --- a/docs/changelog/111123.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111123 -summary: Add Lucene segment-level fields stats -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/111154.yaml b/docs/changelog/111154.yaml deleted file mode 100644 index 3297f5005a811..0000000000000 --- a/docs/changelog/111154.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111154 -summary: EIS integration -area: Inference -type: feature -issues: [] diff --git a/docs/changelog/111161.yaml b/docs/changelog/111161.yaml deleted file mode 100644 index c081d555ff1ee..0000000000000 --- a/docs/changelog/111161.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111161 -summary: Add support for templates when validating mappings in the simulate ingest - API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/111181.yaml b/docs/changelog/111181.yaml deleted file mode 100644 index 7f9f5937b7652..0000000000000 --- a/docs/changelog/111181.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111181 -summary: "[Inference API] Add Alibaba Cloud AI Search Model support to Inference API" -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/111193.yaml b/docs/changelog/111193.yaml deleted file mode 100644 index 9e56facb60d3a..0000000000000 --- a/docs/changelog/111193.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111193 -summary: Fix cases of collections with one point -area: Geo -type: bug -issues: - - 110982 diff --git a/docs/changelog/111212.yaml b/docs/changelog/111212.yaml deleted file mode 100644 index 67d1513b3ff6f..0000000000000 --- a/docs/changelog/111212.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111212 -summary: Fix score count validation in reranker response -area: Ranking -type: bug -issues: - - 111202 diff --git a/docs/changelog/111215.yaml b/docs/changelog/111215.yaml deleted file mode 100644 index dc044c2283fc4..0000000000000 --- a/docs/changelog/111215.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111215 -summary: Make `SnapshotLifecycleStats` immutable so `SnapshotLifecycleMetadata.EMPTY` - isn't changed as side-effect -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/111225.yaml b/docs/changelog/111225.yaml deleted file mode 100644 index bcd344847cfd2..0000000000000 --- a/docs/changelog/111225.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111225 -summary: Upgrade Azure SDK -area: Snapshot/Restore -type: upgrade -issues: [] diff --git a/docs/changelog/111226.yaml b/docs/changelog/111226.yaml deleted file mode 100644 index 1021a26fa789f..0000000000000 --- a/docs/changelog/111226.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111226 -summary: "ES|QL: add Telemetry API and track top functions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111238.yaml b/docs/changelog/111238.yaml deleted file mode 100644 index b918b754ff595..0000000000000 --- a/docs/changelog/111238.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111238 -summary: Fix validation of TEXT fields with case insensitive comparison -area: EQL -type: bug -issues: - - 111235 diff --git a/docs/changelog/111245.yaml b/docs/changelog/111245.yaml deleted file mode 100644 index 384373d52cb20..0000000000000 --- a/docs/changelog/111245.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111245 -summary: Truncating watcher history if it is too large -area: Watcher -type: bug -issues: - - 94745 diff --git a/docs/changelog/111274.yaml b/docs/changelog/111274.yaml deleted file mode 100644 index e26bcc03ce118..0000000000000 --- a/docs/changelog/111274.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111274 -summary: Include account name in Azure settings exceptions -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/111284.yaml b/docs/changelog/111284.yaml deleted file mode 100644 index f87649a134af6..0000000000000 --- a/docs/changelog/111284.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111284 -summary: Update `semantic_text` field to support indexing numeric and boolean data - types -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/111311.yaml b/docs/changelog/111311.yaml deleted file mode 100644 index 5786e11e885e2..0000000000000 --- a/docs/changelog/111311.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111311 -summary: Adding support for data streams with a match-all template -area: Data streams -type: bug -issues: - - 111204 diff --git a/docs/changelog/111315.yaml b/docs/changelog/111315.yaml deleted file mode 100644 index 0e2e56898b51c..0000000000000 --- a/docs/changelog/111315.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111315 -summary: Add link to flood-stage watermark exception message -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/111316.yaml b/docs/changelog/111316.yaml deleted file mode 100644 index 0d915cd1ec3ea..0000000000000 --- a/docs/changelog/111316.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111316 -summary: "[Service Account] Add `AutoOps` account" -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/111336.yaml b/docs/changelog/111336.yaml deleted file mode 100644 index d5bf602cb7a88..0000000000000 --- a/docs/changelog/111336.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111336 -summary: Use the same chunking configurations for models in the Elasticsearch service -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/111344.yaml b/docs/changelog/111344.yaml deleted file mode 100644 index 3d5988054749d..0000000000000 --- a/docs/changelog/111344.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111344 -summary: Add support for Azure Managed Identity -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/111367.yaml b/docs/changelog/111367.yaml deleted file mode 100644 index 89e6c1d3b4da4..0000000000000 --- a/docs/changelog/111367.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111367 -summary: "ESQL: Add Values aggregation tests, fix `ConstantBytesRefBlock` memory handling" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/111412.yaml b/docs/changelog/111412.yaml deleted file mode 100644 index 297fa77cd2664..0000000000000 --- a/docs/changelog/111412.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111412 -summary: Make enrich cache based on memory usage -area: Ingest Node -type: enhancement -issues: - - 106081 diff --git a/docs/changelog/111413.yaml b/docs/changelog/111413.yaml deleted file mode 100644 index 0eae45b17d0c4..0000000000000 --- a/docs/changelog/111413.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111413 -summary: "ESQL: Fix synthetic attribute pruning" -area: ES|QL -type: bug -issues: - - 105821 diff --git a/docs/changelog/111420.yaml b/docs/changelog/111420.yaml deleted file mode 100644 index 4e2640ac5762a..0000000000000 --- a/docs/changelog/111420.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111420 -summary: "[Query rules] Add `exclude` query rule type" -area: Relevance -type: feature -issues: [] diff --git a/docs/changelog/111437.yaml b/docs/changelog/111437.yaml deleted file mode 100644 index a50312ffdd1aa..0000000000000 --- a/docs/changelog/111437.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111437 -summary: "[ES|QL] Create `Range` in `PushFiltersToSource` for qualified pushable filters on the same field" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111445.yaml b/docs/changelog/111445.yaml deleted file mode 100644 index 9ba8e4371bd0c..0000000000000 --- a/docs/changelog/111445.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111445 -summary: Support booleans in routing path -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/111457.yaml b/docs/changelog/111457.yaml deleted file mode 100644 index f4ad4ee53eb0a..0000000000000 --- a/docs/changelog/111457.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111457 -summary: Add support for boolean dimensions -area: TSDB -type: enhancement -issues: - - 111338 diff --git a/docs/changelog/111465.yaml b/docs/changelog/111465.yaml deleted file mode 100644 index 2a8df287427a9..0000000000000 --- a/docs/changelog/111465.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111465 -summary: Add range and regexp Intervals -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/111490.yaml b/docs/changelog/111490.yaml deleted file mode 100644 index b67c16189cc62..0000000000000 --- a/docs/changelog/111490.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111490 -summary: Temporarily return both `modelId` and `inferenceId` for GET /_inference until we migrate clients to only `inferenceId` -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/111501.yaml b/docs/changelog/111501.yaml deleted file mode 100644 index a424142376e52..0000000000000 --- a/docs/changelog/111501.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111501 -summary: "[ES|QL] Combine Disjunctive CIDRMatch" -area: ES|QL -type: enhancement -issues: - - 105143 diff --git a/docs/changelog/111516.yaml b/docs/changelog/111516.yaml deleted file mode 100644 index 96e8bd843f750..0000000000000 --- a/docs/changelog/111516.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111516 -summary: Adding support for `allow_partial_search_results` in PIT -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/111523.yaml b/docs/changelog/111523.yaml deleted file mode 100644 index 202d16c5a426d..0000000000000 --- a/docs/changelog/111523.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111523 -summary: Search coordinator uses `event.ingested` in cluster state to do rewrites -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/111544.yaml b/docs/changelog/111544.yaml deleted file mode 100644 index d4c46f485e664..0000000000000 --- a/docs/changelog/111544.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111544 -summary: "ESQL: Strings support for MAX and MIN aggregations" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/111552.yaml b/docs/changelog/111552.yaml deleted file mode 100644 index d9991788d4fa9..0000000000000 --- a/docs/changelog/111552.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111552 -summary: Siem ea 9521 improve test -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111576.yaml b/docs/changelog/111576.yaml deleted file mode 100644 index 6d3c331f4bbd5..0000000000000 --- a/docs/changelog/111576.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111576 -summary: Execute shard snapshot tasks in shard-id order -area: Snapshot/Restore -type: enhancement -issues: - - 108739 diff --git a/docs/changelog/111600.yaml b/docs/changelog/111600.yaml deleted file mode 100644 index 0c1e01e1c2e23..0000000000000 --- a/docs/changelog/111600.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111600 -summary: Make ecs@mappings work with OTel attributes -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/111624.yaml b/docs/changelog/111624.yaml deleted file mode 100644 index 7b04b244ef7a7..0000000000000 --- a/docs/changelog/111624.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111624 -summary: Extend logging for dropped warning headers -area: Infra/Core -type: enhancement -issues: - - 90527 diff --git a/docs/changelog/111644.yaml b/docs/changelog/111644.yaml deleted file mode 100644 index 3705d697c95e3..0000000000000 --- a/docs/changelog/111644.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111644 -summary: Force using the last centroid during merging -area: Aggregations -type: bug -issues: - - 111065 diff --git a/docs/changelog/111655.yaml b/docs/changelog/111655.yaml deleted file mode 100644 index 077714d15a712..0000000000000 --- a/docs/changelog/111655.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111655 -summary: Migrate Inference to `ChunkedToXContent` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/111683.yaml b/docs/changelog/111683.yaml deleted file mode 100644 index cbb2e5ad71ddc..0000000000000 --- a/docs/changelog/111683.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111683 -summary: Only emit product origin in deprecation log if present -area: Infra/Logging -type: bug -issues: - - 81757 diff --git a/docs/changelog/111689.yaml b/docs/changelog/111689.yaml deleted file mode 100644 index ccb3d4d4f87c5..0000000000000 --- a/docs/changelog/111689.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111689 -summary: Add nanos support to `ZonedDateTime` serialization -area: Infra/Core -type: enhancement -issues: - - 68292 diff --git a/docs/changelog/111690.yaml b/docs/changelog/111690.yaml deleted file mode 100644 index 36e715744ad88..0000000000000 --- a/docs/changelog/111690.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111690 -summary: "ESQL: Support INLINESTATS grouped on expressions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111740.yaml b/docs/changelog/111740.yaml deleted file mode 100644 index 48b7ee200e45e..0000000000000 --- a/docs/changelog/111740.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111740 -summary: Fix Start Trial API output acknowledgement header for features -area: License -type: bug -issues: - - 111739 diff --git a/docs/changelog/111749.yaml b/docs/changelog/111749.yaml deleted file mode 100644 index 77e0c65005dd6..0000000000000 --- a/docs/changelog/111749.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111749 -summary: "ESQL: Added `mv_percentile` function" -area: ES|QL -type: feature -issues: - - 111591 diff --git a/docs/changelog/111770.yaml b/docs/changelog/111770.yaml deleted file mode 100644 index 8d6bde6b25ef9..0000000000000 --- a/docs/changelog/111770.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111770 -summary: Integrate IBM watsonx to Inference API for text embeddings -area: Experiences -type: enhancement -issues: [] diff --git a/docs/changelog/111779.yaml b/docs/changelog/111779.yaml deleted file mode 100644 index 52c635490e1e4..0000000000000 --- a/docs/changelog/111779.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 111779 -summary: "ESQL: Fix serialization during `can_match`" -area: ES|QL -type: bug -issues: - - 111701 - - 111726 diff --git a/docs/changelog/111797.yaml b/docs/changelog/111797.yaml deleted file mode 100644 index 00b793a19d9c3..0000000000000 --- a/docs/changelog/111797.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111797 -summary: "ESQL: fix for missing indices error message" -area: ES|QL -type: bug -issues: - - 111712 diff --git a/docs/changelog/111809.yaml b/docs/changelog/111809.yaml deleted file mode 100644 index 5a2f220e3a697..0000000000000 --- a/docs/changelog/111809.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111809 -summary: Add Field caps support for Semantic Text -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/111818.yaml b/docs/changelog/111818.yaml deleted file mode 100644 index c3a632861aae6..0000000000000 --- a/docs/changelog/111818.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111818 -summary: Add tier preference to security index settings allowlist -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/111840.yaml b/docs/changelog/111840.yaml deleted file mode 100644 index c40a9e2aef621..0000000000000 --- a/docs/changelog/111840.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111840 -summary: "ESQL: Add async ID and `is_running` headers to ESQL async query" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/111855.yaml b/docs/changelog/111855.yaml deleted file mode 100644 index 3f15e9c20135a..0000000000000 --- a/docs/changelog/111855.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111855 -summary: "ESQL: Profile more timing information" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/111874.yaml b/docs/changelog/111874.yaml deleted file mode 100644 index 26ec90aa6cd4c..0000000000000 --- a/docs/changelog/111874.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 111874 -summary: "ESQL: BUCKET: allow numerical spans as whole numbers" -area: ES|QL -type: enhancement -issues: - - 104646 - - 109340 - - 105375 diff --git a/docs/changelog/111879.yaml b/docs/changelog/111879.yaml deleted file mode 100644 index b8c2111e1d286..0000000000000 --- a/docs/changelog/111879.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111879 -summary: "ESQL: Have BUCKET generate friendlier intervals" -area: ES|QL -type: enhancement -issues: - - 110916 diff --git a/docs/changelog/111915.yaml b/docs/changelog/111915.yaml deleted file mode 100644 index f64c45b82d10c..0000000000000 --- a/docs/changelog/111915.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111915 -summary: Fix DLS & FLS sometimes being enforced when it is disabled -area: Authorization -type: bug -issues: - - 94709 diff --git a/docs/changelog/111917.yaml b/docs/changelog/111917.yaml deleted file mode 100644 index 0dc760d76a698..0000000000000 --- a/docs/changelog/111917.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 111917 -summary: "[ES|QL] Cast mixed numeric types to a common numeric type for Coalesce and\ - \ In at Analyzer" -area: ES|QL -type: enhancement -issues: - - 111486 diff --git a/docs/changelog/111937.yaml b/docs/changelog/111937.yaml deleted file mode 100644 index 7d856e29d54c5..0000000000000 --- a/docs/changelog/111937.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111937 -summary: Handle `BigInteger` in xcontent copy -area: Infra/Core -type: bug -issues: - - 111812 diff --git a/docs/changelog/111948.yaml b/docs/changelog/111948.yaml deleted file mode 100644 index a3a592abaf1ca..0000000000000 --- a/docs/changelog/111948.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111948 -summary: Upgrade xcontent to Jackson 2.17.0 -area: Infra/Core -type: upgrade -issues: [] diff --git a/docs/changelog/111950.yaml b/docs/changelog/111950.yaml deleted file mode 100644 index 3f23c17d8e652..0000000000000 --- a/docs/changelog/111950.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111950 -summary: "[ES|QL] Name parameter with leading underscore" -area: ES|QL -type: enhancement -issues: - - 111821 diff --git a/docs/changelog/111955.yaml b/docs/changelog/111955.yaml deleted file mode 100644 index ebc518203b7cc..0000000000000 --- a/docs/changelog/111955.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 111955 -summary: Clean up dangling S3 multipart uploads -area: Snapshot/Restore -type: enhancement -issues: - - 101169 - - 44971 diff --git a/docs/changelog/111968.yaml b/docs/changelog/111968.yaml deleted file mode 100644 index 9d758c76369e9..0000000000000 --- a/docs/changelog/111968.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111968 -summary: "ESQL: don't lose the original casting error message" -area: ES|QL -type: bug -issues: - - 111967 diff --git a/docs/changelog/111969.yaml b/docs/changelog/111969.yaml deleted file mode 100644 index 2d276850c4988..0000000000000 --- a/docs/changelog/111969.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111969 -summary: "[Profiling] add `container.id` field to event index template" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/111972.yaml b/docs/changelog/111972.yaml deleted file mode 100644 index a5bfcd5b0882e..0000000000000 --- a/docs/changelog/111972.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 111972 -summary: Introduce global retention in data stream lifecycle. -area: Data streams -type: feature -issues: [] -highlight: - title: Add global retention in data stream lifecycle - body: |- - Data stream lifecycle now supports configuring retention on a cluster level, - namely global retention. Global retention \nallows us to configure two different - retentions: - - - `data_streams.lifecycle.retention.default` is applied to all data streams managed - by the data stream lifecycle that do not have retention defined on the data stream level. - - `data_streams.lifecycle.retention.max` is applied to all data streams managed by the - data stream lifecycle and it allows any data stream \ndata to be deleted after the `max_retention` has passed. - notable: true diff --git a/docs/changelog/111981.yaml b/docs/changelog/111981.yaml deleted file mode 100644 index 13b8fe4b7e38d..0000000000000 --- a/docs/changelog/111981.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 111981 -summary: Allow fields with dots in sparse vector field mapper -area: Mapping -type: enhancement -issues: - - 109118 diff --git a/docs/changelog/112019.yaml b/docs/changelog/112019.yaml deleted file mode 100644 index 7afb207864ed7..0000000000000 --- a/docs/changelog/112019.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112019 -summary: Display effective retention in the relevant data stream APIs -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/112024.yaml b/docs/changelog/112024.yaml deleted file mode 100644 index e426693fba964..0000000000000 --- a/docs/changelog/112024.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112024 -summary: (API) Cluster Health report `unassigned_primary_shards` -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/112026.yaml b/docs/changelog/112026.yaml deleted file mode 100644 index fedf001923ab4..0000000000000 --- a/docs/changelog/112026.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112026 -summary: Create `StreamingHttpResultPublisher` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112055.yaml b/docs/changelog/112055.yaml deleted file mode 100644 index cdf15b3b37468..0000000000000 --- a/docs/changelog/112055.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112055 -summary: "ESQL: `mv_median_absolute_deviation` function" -area: ES|QL -type: feature -issues: - - 111590 diff --git a/docs/changelog/112058.yaml b/docs/changelog/112058.yaml deleted file mode 100644 index e974b3413582e..0000000000000 --- a/docs/changelog/112058.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112058 -summary: Fix RRF validation for `rank_constant` < 1 -area: Ranking -type: bug -issues: [] diff --git a/docs/changelog/112063.yaml b/docs/changelog/112063.yaml deleted file mode 100644 index 190993967a074..0000000000000 --- a/docs/changelog/112063.yaml +++ /dev/null @@ -1,32 +0,0 @@ -pr: 112063 -summary: Spatial search functions support multi-valued fields in compute engine -area: ES|QL -type: bug -issues: - - 112102 - - 112505 - - 110830 -highlight: - title: "ESQL: Multi-value fields supported in Geospatial predicates" - body: |- - Supporting multi-value fields in `WHERE` predicates is a challenge due to not knowing whether `ALL` or `ANY` - of the values in the field should pass the predicate. - For example, should the field `age:[10,30]` pass the predicate `WHERE age>20` or not? - This ambiguity does not exist with the spatial predicates - `ST_INTERSECTS` and `ST_DISJOINT`, because the choice between `ANY` or `ALL` - is implied by the predicate itself. - Consider a predicate checking a field named `location` against a test geometry named `shape`: - - * `ST_INTERSECTS(field, shape)` - true if `ANY` value can intersect the shape - * `ST_DISJOINT(field, shape)` - true only if `ALL` values are disjoint from the shape - - This works even if the shape argument is itself a complex or compound geometry. - - Similar logic exists for `ST_CONTAINS` and `ST_WITHIN` predicates, but these are not as easily solved - with `ANY` or `ALL`, because a collection of geometries contains another collection if each of the contained - geometries is within at least one of the containing geometries. Evaluating this requires that the multi-value - field is first combined into a single geometry before performing the predicate check. - - * `ST_CONTAINS(field, shape)` - true if the combined geometry contains the shape - * `ST_WITHIN(field, shape)` - true if the combined geometry is within the shape - notable: false diff --git a/docs/changelog/112066.yaml b/docs/changelog/112066.yaml deleted file mode 100644 index 5dd846766bc8e..0000000000000 --- a/docs/changelog/112066.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112066 -summary: Do not treat replica as unassigned if primary recently created and unassigned - time is below a threshold -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/112081.yaml b/docs/changelog/112081.yaml deleted file mode 100644 index a4009e01fca71..0000000000000 --- a/docs/changelog/112081.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112081 -summary: "[ES|QL] Validate index name in parser" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112100.yaml b/docs/changelog/112100.yaml deleted file mode 100644 index 9135edecb4d77..0000000000000 --- a/docs/changelog/112100.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112100 -summary: Exclude internal data streams from global retention -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/112123.yaml b/docs/changelog/112123.yaml deleted file mode 100644 index 0c0d7ac44cd17..0000000000000 --- a/docs/changelog/112123.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112123 -summary: SLM interval schedule followup - add back `getFieldName` style getters -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/112126.yaml b/docs/changelog/112126.yaml deleted file mode 100644 index f6a7aeb893a5e..0000000000000 --- a/docs/changelog/112126.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112126 -summary: Add support for spatial relationships in point field mapper -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/112133.yaml b/docs/changelog/112133.yaml deleted file mode 100644 index 11109402b7373..0000000000000 --- a/docs/changelog/112133.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112133 -summary: Add telemetry for repository usage -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/112151.yaml b/docs/changelog/112151.yaml deleted file mode 100644 index f5cbfd8da07c2..0000000000000 --- a/docs/changelog/112151.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112151 -summary: Store original source for keywords using a normalizer -area: Logs -type: enhancement -issues: [] diff --git a/docs/changelog/112199.yaml b/docs/changelog/112199.yaml deleted file mode 100644 index eb22f215f9828..0000000000000 --- a/docs/changelog/112199.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112199 -summary: Support docvalues only query in shape field -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/112200.yaml b/docs/changelog/112200.yaml deleted file mode 100644 index 0c2c3d71e3ddf..0000000000000 --- a/docs/changelog/112200.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112200 -summary: "ES|QL: better validation of GROK patterns" -area: ES|QL -type: bug -issues: - - 112111 diff --git a/docs/changelog/112210.yaml b/docs/changelog/112210.yaml deleted file mode 100644 index 6483b8b01315c..0000000000000 --- a/docs/changelog/112210.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112210 -summary: Expose global retention settings via data stream lifecycle API -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/112214.yaml b/docs/changelog/112214.yaml deleted file mode 100644 index 430f95a72bb3f..0000000000000 --- a/docs/changelog/112214.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112214 -summary: '`ByteArrayStreamInput:` Return -1 when there are no more bytes to read' -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/112218.yaml b/docs/changelog/112218.yaml deleted file mode 100644 index c426dd7ade4ed..0000000000000 --- a/docs/changelog/112218.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 112218 -summary: "ESQL: Fix a bug in `MV_PERCENTILE`" -area: ES|QL -type: bug -issues: - - 112193 - - 112180 - - 112187 - - 112188 diff --git a/docs/changelog/112262.yaml b/docs/changelog/112262.yaml deleted file mode 100644 index fe23c14c79c9e..0000000000000 --- a/docs/changelog/112262.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112262 -summary: Check for disabling own user in Put User API -area: Authentication -type: bug -issues: - - 90205 diff --git a/docs/changelog/112263.yaml b/docs/changelog/112263.yaml deleted file mode 100644 index 2d1321f327673..0000000000000 --- a/docs/changelog/112263.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112263 -summary: Fix `TokenService` always appearing used in Feature Usage -area: License -type: bug -issues: - - 61956 diff --git a/docs/changelog/112270.yaml b/docs/changelog/112270.yaml deleted file mode 100644 index 1e6b9c7fc9290..0000000000000 --- a/docs/changelog/112270.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112270 -summary: Support sparse embedding models in the elasticsearch inference service -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112273.yaml b/docs/changelog/112273.yaml deleted file mode 100644 index 3182a1884a145..0000000000000 --- a/docs/changelog/112273.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 111181 -summary: "[Inference API] Add Docs for AlibabaCloud AI Search Support for the Inference API" -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/112277.yaml b/docs/changelog/112277.yaml deleted file mode 100644 index eac474555999a..0000000000000 --- a/docs/changelog/112277.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112277 -summary: Upgrade `repository-azure` dependencies -area: Snapshot/Restore -type: upgrade -issues: [] diff --git a/docs/changelog/112282.yaml b/docs/changelog/112282.yaml deleted file mode 100644 index beea119b06aef..0000000000000 --- a/docs/changelog/112282.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112282 -summary: Adds example plugin for custom ingest processor -area: Ingest Node -type: enhancement -issues: - - 111539 diff --git a/docs/changelog/112294.yaml b/docs/changelog/112294.yaml deleted file mode 100644 index 71ce9eeef584c..0000000000000 --- a/docs/changelog/112294.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 112294 -summary: "Use fallback synthetic source for `copy_to` and doc_values: false cases" -area: Mapping -type: enhancement -issues: - - 110753 - - 110038 - - 109546 diff --git a/docs/changelog/112295.yaml b/docs/changelog/112295.yaml deleted file mode 100644 index ecbd365d03918..0000000000000 --- a/docs/changelog/112295.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112295 -summary: "ESQL: Speed up CASE for some parameters" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112303.yaml b/docs/changelog/112303.yaml deleted file mode 100644 index a363e621e4c48..0000000000000 --- a/docs/changelog/112303.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112303 -summary: Add 'verbose' flag retrieving `maximum_timestamp` for get data stream API -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/112320.yaml b/docs/changelog/112320.yaml deleted file mode 100644 index d35a08dfa4e91..0000000000000 --- a/docs/changelog/112320.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112320 -summary: Upgrade xcontent to Jackson 2.17.2 -area: Infra/Core -type: upgrade -issues: [] diff --git a/docs/changelog/112330.yaml b/docs/changelog/112330.yaml deleted file mode 100644 index 498698f5175ba..0000000000000 --- a/docs/changelog/112330.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112330 -summary: Add links to network disconnect troubleshooting -area: Network -type: enhancement -issues: [] diff --git a/docs/changelog/112337.yaml b/docs/changelog/112337.yaml deleted file mode 100644 index f7d667e23cfe9..0000000000000 --- a/docs/changelog/112337.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112337 -summary: Add workaround for missing shard gen blob -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/112341.yaml b/docs/changelog/112341.yaml deleted file mode 100644 index 8f44b53ad9998..0000000000000 --- a/docs/changelog/112341.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112341 -summary: Fix DLS using runtime fields and synthetic source -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/112345.yaml b/docs/changelog/112345.yaml deleted file mode 100644 index b922fe3754cbb..0000000000000 --- a/docs/changelog/112345.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 112345 -summary: Allow dimension fields to have multiple values in standard and logsdb index - mode -area: Mapping -type: enhancement -issues: - - 112232 - - 112239 diff --git a/docs/changelog/112348.yaml b/docs/changelog/112348.yaml deleted file mode 100644 index 84110a7cd4f1b..0000000000000 --- a/docs/changelog/112348.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112348 -summary: Introduce repository integrity verification API -area: Snapshot/Restore -type: enhancement -issues: - - 52622 diff --git a/docs/changelog/112350.yaml b/docs/changelog/112350.yaml deleted file mode 100644 index 994cd3a65c633..0000000000000 --- a/docs/changelog/112350.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112350 -summary: "[ESQL] Add `SPACE` function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112369.yaml b/docs/changelog/112369.yaml deleted file mode 100644 index fb1c4775f7a12..0000000000000 --- a/docs/changelog/112369.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112369 -summary: Register Task while Streaming -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112397.yaml b/docs/changelog/112397.yaml deleted file mode 100644 index e67478ec69b1c..0000000000000 --- a/docs/changelog/112397.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112397 -summary: Control storing array source with index setting -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/112401.yaml b/docs/changelog/112401.yaml deleted file mode 100644 index 65e9e76ac25f6..0000000000000 --- a/docs/changelog/112401.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112401 -summary: "ESQL: Fix CASE when conditions are multivalued" -area: ES|QL -type: bug -issues: - - 112359 diff --git a/docs/changelog/112405.yaml b/docs/changelog/112405.yaml deleted file mode 100644 index 4e9f095fb80a8..0000000000000 --- a/docs/changelog/112405.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112405 -summary: Improve date expression/remote handling in index names -area: Search -type: bug -issues: - - 112243 diff --git a/docs/changelog/112409.yaml b/docs/changelog/112409.yaml deleted file mode 100644 index bad94b9f5f2be..0000000000000 --- a/docs/changelog/112409.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112409 -summary: Include reason when no nodes are found -area: "Transform" -type: bug -issues: - - 112404 diff --git a/docs/changelog/112412.yaml b/docs/changelog/112412.yaml deleted file mode 100644 index fda53ebd1ade0..0000000000000 --- a/docs/changelog/112412.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112412 -summary: Expose `HexFormat` in Painless -area: Infra/Scripting -type: enhancement -issues: [] diff --git a/docs/changelog/112431.yaml b/docs/changelog/112431.yaml deleted file mode 100644 index b8c1197bdc7ef..0000000000000 --- a/docs/changelog/112431.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112431 -summary: "Async search: Add ID and \"is running\" http headers" -area: Search -type: feature -issues: - - 109576 diff --git a/docs/changelog/112440.yaml b/docs/changelog/112440.yaml deleted file mode 100644 index f208474fa2686..0000000000000 --- a/docs/changelog/112440.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112440 -summary: "logs-apm.error-*: define log.level field as keyword" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/112451.yaml b/docs/changelog/112451.yaml deleted file mode 100644 index aa852cf5e2a1a..0000000000000 --- a/docs/changelog/112451.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 112451 -summary: Update data stream lifecycle telemetry to track global retention -area: Data streams -type: breaking -issues: [] -breaking: - title: Update data stream lifecycle telemetry to track global retention - area: REST API - details: |- - In this release we introduced global retention settings that fulfil the following criteria: - - - a data stream managed by the data stream lifecycle, - - a data stream that is not an internal data stream. - - As a result, we defined different types of retention: - - - **data retention**: the retention configured on data stream level by the data stream user or owner - - **default global retention:** the retention configured by an admin on a cluster level and applied to any - data stream that doesn't have data retention and fulfils the criteria. - - **max global retention:** the retention configured by an admin to guard against having long retention periods. - Any data stream that fulfills the criteria will adhere to the data retention unless it exceeds the max retention, - in which case the max global retention applies. - - **effective retention:** the retention that applies on the data stream that fulfill the criteria at a given moment - in time. It takes into consideration all the retention above and resolves it to the retention that will take effect. - - Considering the above changes, having a field named `retention` in the usage API was confusing. For this reason, we - renamed it to `data_retention` and added telemetry about the other configurations too. - impact: Users that use the field `data_lifecycle.retention` should use the `data_lifecycle.data_retention` - notable: false diff --git a/docs/changelog/112481.yaml b/docs/changelog/112481.yaml deleted file mode 100644 index 3e539ce8e4b75..0000000000000 --- a/docs/changelog/112481.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112481 -summary: Validate streaming HTTP Response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112489.yaml b/docs/changelog/112489.yaml deleted file mode 100644 index ebc84927b0e76..0000000000000 --- a/docs/changelog/112489.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112489 -summary: "ES|QL: better validation for RLIKE patterns" -area: ES|QL -type: bug -issues: - - 112485 diff --git a/docs/changelog/112508.yaml b/docs/changelog/112508.yaml deleted file mode 100644 index 3945ebd226ac4..0000000000000 --- a/docs/changelog/112508.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112508 -summary: "[ML] Create Inference API will no longer return model_id and now only return inference_id" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/112512.yaml b/docs/changelog/112512.yaml deleted file mode 100644 index a9812784ccfca..0000000000000 --- a/docs/changelog/112512.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112512 -summary: Add Completion Inference API for Alibaba Cloud AI Search Model -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112519.yaml b/docs/changelog/112519.yaml deleted file mode 100644 index aa8a942ef0f58..0000000000000 --- a/docs/changelog/112519.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112519 -summary: Lower the memory footprint when creating `DelayedBucket` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112547.yaml b/docs/changelog/112547.yaml deleted file mode 100644 index 7f42f2a82976e..0000000000000 --- a/docs/changelog/112547.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112547 -summary: Remove reduce and `reduceContext` from `DelayedBucket` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112565.yaml b/docs/changelog/112565.yaml deleted file mode 100644 index be9ec41419a09..0000000000000 --- a/docs/changelog/112565.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112565 -summary: Server-Sent Events for Inference response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112571.yaml b/docs/changelog/112571.yaml deleted file mode 100644 index f1be2e5c291de..0000000000000 --- a/docs/changelog/112571.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 112571 -summary: Deprecate dot-prefixed indices and composable template index patterns -area: CRUD -type: deprecation -issues: [] -deprecation: - title: Deprecate dot-prefixed indices and composable template index patterns - area: CRUD - details: "Indices beginning with a dot '.' are reserved for system and internal\ - \ indices, and should not be used by and end-user. Additionally, composable index\ - \ templates that contain patterns for dot-prefixed indices should also be avoided,\ - \ as these patterns are meant for internal use only. In a future Elasticsearch\ - \ version, creation of these dot-prefixed indices will no longer be allowed." - impact: "Requests performing an action that would create an index beginning with\ - \ a dot (indexing a document, manual creation, reindex), or creating an index\ - \ template with index patterns beginning with a dot, will contain a deprecation\ - \ header warning about dot-prefixed indices in the response." diff --git a/docs/changelog/112574.yaml b/docs/changelog/112574.yaml deleted file mode 100644 index 3111697a8b97f..0000000000000 --- a/docs/changelog/112574.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112574 -summary: Add privileges required for CDR misconfiguration features to work on AWS SecurityHub integration -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/112595.yaml b/docs/changelog/112595.yaml deleted file mode 100644 index 19ee0368475ae..0000000000000 --- a/docs/changelog/112595.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112595 -summary: Collect and display execution metadata for ES|QL cross cluster searches -area: ES|QL -type: enhancement -issues: - - 112402 diff --git a/docs/changelog/112612.yaml b/docs/changelog/112612.yaml deleted file mode 100644 index d6037e34ff171..0000000000000 --- a/docs/changelog/112612.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112612 -summary: Set `replica_unassigned_buffer_time` in constructor -area: Health -type: bug -issues: [] diff --git a/docs/changelog/112645.yaml b/docs/changelog/112645.yaml deleted file mode 100644 index cf4ef4609a1f3..0000000000000 --- a/docs/changelog/112645.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112645 -summary: Add support for multi-value dimensions -area: Mapping -type: enhancement -issues: - - 110387 diff --git a/docs/changelog/112652.yaml b/docs/changelog/112652.yaml deleted file mode 100644 index c7ddcd4bffdc8..0000000000000 --- a/docs/changelog/112652.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 110399 -summary: "[Inference API] alibabacloud ai search service support chunk infer to support semantic_text field" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112665.yaml b/docs/changelog/112665.yaml deleted file mode 100644 index ae2cf7f171f4b..0000000000000 --- a/docs/changelog/112665.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 112665 -summary: Remove zstd feature flag for index codec best compression -area: Codec -type: enhancement -issues: [] -highlight: - title: Enable ZStandard compression for indices with index.codec set to best_compression - body: |- - Before DEFLATE compression was used to compress stored fields in indices with index.codec index setting set to - best_compression, with this change ZStandard is used as compression algorithm to stored fields for indices with - index.codec index setting set to best_compression. The usage ZStandard results in less storage usage with a - similar indexing throughput depending on what options are used. Experiments with indexing logs have shown that - ZStandard offers ~12% lower storage usage and a ~14% higher indexing throughput compared to DEFLATE. - notable: true diff --git a/docs/changelog/112677.yaml b/docs/changelog/112677.yaml deleted file mode 100644 index 89662236c6ca5..0000000000000 --- a/docs/changelog/112677.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112677 -summary: Stream OpenAI Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/112678.yaml b/docs/changelog/112678.yaml deleted file mode 100644 index 7a1a9d622a65f..0000000000000 --- a/docs/changelog/112678.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112678 -summary: Make "too many clauses" throw IllegalArgumentException to avoid 500s -area: Search -type: bug -issues: - - 112177 \ No newline at end of file diff --git a/docs/changelog/112687.yaml b/docs/changelog/112687.yaml deleted file mode 100644 index dd079e1b700c4..0000000000000 --- a/docs/changelog/112687.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112687 -summary: Add `TaskManager` to `pluginServices` -area: Infra/Metrics -type: enhancement -issues: [] diff --git a/docs/changelog/112706.yaml b/docs/changelog/112706.yaml deleted file mode 100644 index fc0f5c4c554a1..0000000000000 --- a/docs/changelog/112706.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112706 -summary: Configure keeping source in `FieldMapper` -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/112707.yaml b/docs/changelog/112707.yaml deleted file mode 100644 index 9f16cfcd2b6f2..0000000000000 --- a/docs/changelog/112707.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112707 -summary: Deduplicate `BucketOrder` when deserializing -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112723.yaml b/docs/changelog/112723.yaml deleted file mode 100644 index dbee3232d1c75..0000000000000 --- a/docs/changelog/112723.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112723 -summary: Improve DateTime error handling and add some bad date tests -area: Search -type: bug -issues: - - 112190 diff --git a/docs/changelog/112768.yaml b/docs/changelog/112768.yaml deleted file mode 100644 index 13d5b8eaae38f..0000000000000 --- a/docs/changelog/112768.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112768 -summary: Deduplicate Kuromoji User Dictionary -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/112826.yaml b/docs/changelog/112826.yaml deleted file mode 100644 index 65c05b4d6035a..0000000000000 --- a/docs/changelog/112826.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112826 -summary: "Multi term intervals: increase max_expansions" -area: Search -type: enhancement -issues: - - 110491 diff --git a/docs/changelog/112850.yaml b/docs/changelog/112850.yaml deleted file mode 100644 index 97a8877f6291c..0000000000000 --- a/docs/changelog/112850.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112850 -summary: Fix synthetic source field names for multi-fields -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/112874.yaml b/docs/changelog/112874.yaml deleted file mode 100644 index 99ed9ed28fa0f..0000000000000 --- a/docs/changelog/112874.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112874 -summary: Reduce heap usage for `AggregatorsReducer` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/112888.yaml b/docs/changelog/112888.yaml deleted file mode 100644 index 48806a491e531..0000000000000 --- a/docs/changelog/112888.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112888 -summary: Fix `getDatabaseType` for unusual MMDBs -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/112895.yaml b/docs/changelog/112895.yaml deleted file mode 100644 index 59d391f649280..0000000000000 --- a/docs/changelog/112895.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112895 -summary: (logger) change from error to warn for short circuiting user -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/112905.yaml b/docs/changelog/112905.yaml deleted file mode 100644 index aac0b7e9dfb59..0000000000000 --- a/docs/changelog/112905.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112905 -summary: "[ES|QL] Named parameter for field names and field name patterns" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112916.yaml b/docs/changelog/112916.yaml deleted file mode 100644 index 91dc7f332efc4..0000000000000 --- a/docs/changelog/112916.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112916 -summary: Allow out of range term queries for numeric types -area: Search -type: bug -issues: [] diff --git a/docs/changelog/112929.yaml b/docs/changelog/112929.yaml deleted file mode 100644 index e5f49897432de..0000000000000 --- a/docs/changelog/112929.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112929 -summary: "ES|QL: Add support for cached strings in plan serialization" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/112933.yaml b/docs/changelog/112933.yaml deleted file mode 100644 index 222cd5aadf739..0000000000000 --- a/docs/changelog/112933.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112933 -summary: "Allow incubating Panama Vector in simdvec, and add vectorized `ipByteBin`" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/112938.yaml b/docs/changelog/112938.yaml deleted file mode 100644 index 82b98871c3352..0000000000000 --- a/docs/changelog/112938.yaml +++ /dev/null @@ -1,35 +0,0 @@ -pr: 112938 -summary: Enhance SORT push-down to Lucene to cover references to fields and ST_DISTANCE function -area: ES|QL -type: enhancement -issues: - - 109973 -highlight: - title: Enhance SORT push-down to Lucene to cover references to fields and ST_DISTANCE function - body: |- - The most used and likely most valuable geospatial search query in Elasticsearch is the sorted proximity search, - finding items within a certain distance of a point of interest and sorting the results by distance. - This has been possible in ES|QL since 8.15.0, but the sorting was done in-memory, not pushed down to Lucene. - Now the sorting is pushed down to Lucene, which results in a significant performance improvement. - - Queries that perform both filtering and sorting on distance are supported. For example: - - [source,esql] - ---- - FROM test - | EVAL distance = ST_DISTANCE(location, TO_GEOPOINT("POINT(37.7749, -122.4194)")) - | WHERE distance < 1000000 - | SORT distance ASC, name DESC - | LIMIT 10 - ---- - - In addition, the support for sorting on EVAL expressions has been extended to cover references to fields: - - [source,esql] - ---- - FROM test - | EVAL ref = field - | SORT ref ASC - | LIMIT 10 - ---- - notable: false diff --git a/docs/changelog/112972.yaml b/docs/changelog/112972.yaml deleted file mode 100644 index 5332ac13fd13f..0000000000000 --- a/docs/changelog/112972.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 112972 -summary: "ILM: Add `total_shards_per_node` setting to searchable snapshot" -area: ILM+SLM -type: enhancement -issues: - - 112261 diff --git a/docs/changelog/112973.yaml b/docs/changelog/112973.yaml deleted file mode 100644 index 3ba86a31334ff..0000000000000 --- a/docs/changelog/112973.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112973 -summary: Fix verbose get data stream API not requiring extra privileges -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/113013.yaml b/docs/changelog/113013.yaml deleted file mode 100644 index 1cec31074e806..0000000000000 --- a/docs/changelog/113013.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113013 -summary: Account for `DelayedBucket` before reduction -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/113027.yaml b/docs/changelog/113027.yaml deleted file mode 100644 index 825740cf5691d..0000000000000 --- a/docs/changelog/113027.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113027 -summary: Retrieve the source for objects and arrays in a separate parsing phase -area: Mapping -type: bug -issues: - - 112374 diff --git a/docs/changelog/113051.yaml b/docs/changelog/113051.yaml deleted file mode 100644 index 9be68f9f2b03e..0000000000000 --- a/docs/changelog/113051.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113051 -summary: Add Search Inference ID To Semantic Text Mapping -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/113103.yaml b/docs/changelog/113103.yaml deleted file mode 100644 index 2ed98e0907bae..0000000000000 --- a/docs/changelog/113103.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113103 -summary: "ESQL: Align year diffing to the rest of the units in DATE_DIFF: chronological" -area: ES|QL -type: bug -issues: - - 112482 diff --git a/docs/changelog/113143.yaml b/docs/changelog/113143.yaml deleted file mode 100644 index 4a2044cca0ce4..0000000000000 --- a/docs/changelog/113143.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 113143 -summary: Deprecate dutch_kp and lovins stemmer as they are removed in Lucene 10 -area: Analysis -type: deprecation -issues: [] -deprecation: - title: Deprecate dutch_kp and lovins stemmer as they are removed in Lucene 10 - area: Analysis - details: kp, dutch_kp, dutchKp and lovins stemmers are deprecated and will be removed. - impact: These stemmers will be removed and will be no longer supported. diff --git a/docs/changelog/113158.yaml b/docs/changelog/113158.yaml deleted file mode 100644 index d097ea11b3a23..0000000000000 --- a/docs/changelog/113158.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113158 -summary: Adds a new Inference API for streaming responses back to the user. -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113172.yaml b/docs/changelog/113172.yaml deleted file mode 100644 index 2d03196b0cfbd..0000000000000 --- a/docs/changelog/113172.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113172 -summary: "[ESQL] Add finish() elapsed time to aggregation profiling times" -area: ES|QL -type: enhancement -issues: - - 112950 diff --git a/docs/changelog/113183.yaml b/docs/changelog/113183.yaml deleted file mode 100644 index f30ce9831adb3..0000000000000 --- a/docs/changelog/113183.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113183 -summary: "ESQL: TOP support for strings" -area: ES|QL -type: feature -issues: - - 109849 diff --git a/docs/changelog/113187.yaml b/docs/changelog/113187.yaml deleted file mode 100644 index 397179c4bc3bb..0000000000000 --- a/docs/changelog/113187.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113187 -summary: Preserve Step Info Across ILM Auto Retries -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/113251.yaml b/docs/changelog/113251.yaml deleted file mode 100644 index 49167e6e4c915..0000000000000 --- a/docs/changelog/113251.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113251 -summary: Span term query to convert to match no docs when unmapped field is targeted -area: Search -type: bug -issues: [] diff --git a/docs/changelog/113276.yaml b/docs/changelog/113276.yaml deleted file mode 100644 index 87241878b3ec4..0000000000000 --- a/docs/changelog/113276.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113276 -summary: Adding component template substitutions to the simulate ingest API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/113280.yaml b/docs/changelog/113280.yaml deleted file mode 100644 index 1d8de0d87dd0d..0000000000000 --- a/docs/changelog/113280.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113280 -summary: Warn for model load failures if they have a status code <500 -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/113286.yaml b/docs/changelog/113286.yaml deleted file mode 100644 index eeffb10b4e638..0000000000000 --- a/docs/changelog/113286.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 113286 -summary: Deprecate legacy params from range query -area: Search -type: deprecation -issues: [] -deprecation: - title: Deprecate legacy params from range query - area: REST API - details: Range query will not longer accept `to`, `from`, `include_lower`, and `include_upper` parameters. - impact: Instead use `gt`, `gte`, `lt` and `lte` parameters. diff --git a/docs/changelog/113297.yaml b/docs/changelog/113297.yaml deleted file mode 100644 index 476619f432639..0000000000000 --- a/docs/changelog/113297.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113297 -summary: "[ES|QL] add reverse function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/113314.yaml b/docs/changelog/113314.yaml deleted file mode 100644 index c496ad3dd86f1..0000000000000 --- a/docs/changelog/113314.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113314 -summary: "[ES|QL] Check expression resolved before checking its data type in `ImplicitCasting`" -area: ES|QL -type: bug -issues: - - 113242 diff --git a/docs/changelog/113333.yaml b/docs/changelog/113333.yaml deleted file mode 100644 index c6a3584845729..0000000000000 --- a/docs/changelog/113333.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113333 -summary: Upgrade to Lucene 9.12 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/113373.yaml b/docs/changelog/113373.yaml deleted file mode 100644 index cbb3829e03425..0000000000000 --- a/docs/changelog/113373.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113373 -summary: Implement `parseBytesRef` for `TimeSeriesRoutingHashFieldType` -area: TSDB -type: bug -issues: - - 112399 diff --git a/docs/changelog/113374.yaml b/docs/changelog/113374.yaml deleted file mode 100644 index f1d5750de0f60..0000000000000 --- a/docs/changelog/113374.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113374 -summary: Add ESQL match function -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/113385.yaml b/docs/changelog/113385.yaml deleted file mode 100644 index 9cee1ebcd4f64..0000000000000 --- a/docs/changelog/113385.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113385 -summary: Small performance improvement in h3 library -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/113387.yaml b/docs/changelog/113387.yaml deleted file mode 100644 index 4819404a55809..0000000000000 --- a/docs/changelog/113387.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113387 -summary: "Add `CircuitBreaker` to TDigest, Step 3: Connect with ESQL CB" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/113498.yaml b/docs/changelog/113498.yaml deleted file mode 100644 index 93b21a1d171eb..0000000000000 --- a/docs/changelog/113498.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113498 -summary: Listing all available databases in the _ingest/geoip/database API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/113499.yaml b/docs/changelog/113499.yaml deleted file mode 100644 index a4d7f28eb0de4..0000000000000 --- a/docs/changelog/113499.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113499 -summary: Fix synthetic source for flattened field when used with `ignore_above` -area: Logs -type: bug -issues: - - 112044 diff --git a/docs/changelog/113552.yaml b/docs/changelog/113552.yaml deleted file mode 100644 index 48f7da309e82e..0000000000000 --- a/docs/changelog/113552.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113552 -summary: Tag redacted document in ingest metadata -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/113570.yaml b/docs/changelog/113570.yaml deleted file mode 100644 index 8cfad9195c5cd..0000000000000 --- a/docs/changelog/113570.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 113570 -summary: Fix `ignore_above` handling in synthetic source when index level setting - is used -area: Logs -type: bug -issues: - - 113538 diff --git a/docs/changelog/113588.yaml b/docs/changelog/113588.yaml deleted file mode 100644 index e797100443f54..0000000000000 --- a/docs/changelog/113588.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113588 -summary: Add asset criticality indices for `kibana_system_user` -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/113607.yaml b/docs/changelog/113607.yaml deleted file mode 100644 index eb25d2600a555..0000000000000 --- a/docs/changelog/113607.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113607 -summary: Add more `dense_vector` details for cluster stats field stats -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/113613.yaml b/docs/changelog/113613.yaml deleted file mode 100644 index 4b020333aaa36..0000000000000 --- a/docs/changelog/113613.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 113613 -summary: "Add `CircuitBreaker` to TDigest, Step 4: Take into account shallow classes\ - \ size" -area: ES|QL -type: enhancement -issues: - - 113916 diff --git a/docs/changelog/113623.yaml b/docs/changelog/113623.yaml deleted file mode 100644 index 8587687d27080..0000000000000 --- a/docs/changelog/113623.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113623 -summary: "Adding chunking settings to `MistralService,` `GoogleAiStudioService,` and\ - \ `HuggingFaceService`" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113690.yaml b/docs/changelog/113690.yaml deleted file mode 100644 index bd5f1245f471e..0000000000000 --- a/docs/changelog/113690.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113690 -summary: Add object param for keeping synthetic source -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/113735.yaml b/docs/changelog/113735.yaml deleted file mode 100644 index 4f6579c7cb9e0..0000000000000 --- a/docs/changelog/113735.yaml +++ /dev/null @@ -1,28 +0,0 @@ -pr: 113735 -summary: "ESQL: Introduce per agg filter" -area: ES|QL -type: feature -issues: [] -highlight: - title: "ESQL: Introduce per agg filter" - body: |- - Add support for aggregation scoped filters that work dynamically on the - data in each group. - - [source,esql] - ---- - | STATS success = COUNT(*) WHERE 200 <= code AND code < 300, - redirect = COUNT(*) WHERE 300 <= code AND code < 400, - client_err = COUNT(*) WHERE 400 <= code AND code < 500, - server_err = COUNT(*) WHERE 500 <= code AND code < 600, - total_count = COUNT(*) - ---- - - Implementation wise, the base AggregateFunction has been extended to - allow a filter to be passed on. This is required to incorporate the - filter as part of the aggregate equality/identity which would fail with - the filter as an external component. - As part of the process, the serialization for the existing aggregations - had to be fixed so AggregateFunction implementations so that it - delegates to their parent first. - notable: true diff --git a/docs/changelog/113812.yaml b/docs/changelog/113812.yaml deleted file mode 100644 index 04498b4ae5f7e..0000000000000 --- a/docs/changelog/113812.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113812 -summary: Add Streaming Inference spec -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113816.yaml b/docs/changelog/113816.yaml deleted file mode 100644 index 8c7cf14e356b3..0000000000000 --- a/docs/changelog/113816.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113816 -summary: Avoid using concurrent collector manager in `LuceneChangesSnapshot` -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/113825.yaml b/docs/changelog/113825.yaml deleted file mode 100644 index 6d4090fda7ed2..0000000000000 --- a/docs/changelog/113825.yaml +++ /dev/null @@ -1,12 +0,0 @@ -pr: 113825 -summary: Cross-cluster search telemetry -area: Search -type: feature -issues: [] -highlight: - title: Cross-cluster search telemetry - body: |- - The cross-cluster search telemetry is collected when cross-cluster searches - are performed, and is returned as "ccs" field in `_cluster/stats` output. - It also add a new parameter `include_remotes=true` to the `_cluster/stats` API - which will collect data from connected remote clusters. diff --git a/docs/changelog/113873.yaml b/docs/changelog/113873.yaml deleted file mode 100644 index ac52aaf94d518..0000000000000 --- a/docs/changelog/113873.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113873 -summary: Default inference endpoint for ELSER -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113897.yaml b/docs/changelog/113897.yaml deleted file mode 100644 index db0c53518613c..0000000000000 --- a/docs/changelog/113897.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113897 -summary: "Add chunking settings configuration to `CohereService,` `AmazonBedrockService,`\ - \ and `AzureOpenAiService`" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113910.yaml b/docs/changelog/113910.yaml deleted file mode 100644 index aa9d3b61fe768..0000000000000 --- a/docs/changelog/113910.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113910 -summary: Do not expand dots when storing objects in ignored source -area: Logs -type: bug -issues: [] diff --git a/docs/changelog/113911.yaml b/docs/changelog/113911.yaml deleted file mode 100644 index 5c2f93a6ea76a..0000000000000 --- a/docs/changelog/113911.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113911 -summary: Enable OpenAI Streaming -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113967.yaml b/docs/changelog/113967.yaml deleted file mode 100644 index 58b72eba49deb..0000000000000 --- a/docs/changelog/113967.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 113967 -summary: "ESQL: Entirely remove META FUNCTIONS" -area: ES|QL -type: breaking -issues: [] -breaking: - title: "ESQL: Entirely remove META FUNCTIONS" - area: ES|QL - details: | - Removes an undocumented syntax from ESQL: META FUNCTION. This was never - reliable or really useful. Consult the documentation instead. - impact: "Removes an undocumented syntax from ESQL: META FUNCTION" - notable: false diff --git a/docs/changelog/113975.yaml b/docs/changelog/113975.yaml deleted file mode 100644 index 632ba038271bb..0000000000000 --- a/docs/changelog/113975.yaml +++ /dev/null @@ -1,19 +0,0 @@ -pr: 113975 -summary: JDK locale database change -area: Mapping -type: breaking -issues: [] -breaking: - title: JDK locale database change - area: Mapping - details: | - {es} 8.16 changes the version of the JDK that is included from version 22 to version 23. This changes the locale database that is used by Elasticsearch from the COMPAT database to the CLDR database. This change can cause significant differences to the textual date formats accepted by Elasticsearch, and to calculated week-dates. - - If you run {es} 8.16 on JDK version 22 or below, it will use the COMPAT locale database to match the behavior of 8.15. However, starting with {es} 9.0, {es} will use the CLDR database regardless of JDK version it is run on. - impact: | - This affects you if you use custom date formats using textual or week-date field specifiers. If you use date fields or calculated week-dates that change between the COMPAT and CLDR databases, then this change will cause Elasticsearch to reject previously valid date fields as invalid data. You might need to modify your ingest or output integration code to account for the differences between these two JDK versions. - - Starting in version 8.15.2, Elasticsearch will log deprecation warnings if you are using date format specifiers that might change on upgrading to JDK 23. These warnings are visible in Kibana. - - For detailed guidance, refer to <> and the https://ela.st/jdk-23-locales[Elastic blog]. - notable: true diff --git a/docs/changelog/113981.yaml b/docs/changelog/113981.yaml deleted file mode 100644 index 38f3a6f04ae46..0000000000000 --- a/docs/changelog/113981.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113981 -summary: "Adding chunking settings to `GoogleVertexAiService,` `AzureAiStudioService,`\ - \ and `AlibabaCloudSearchService`" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/113988.yaml b/docs/changelog/113988.yaml deleted file mode 100644 index d55e7eb2db326..0000000000000 --- a/docs/changelog/113988.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113988 -summary: Track search and fetch failure stats -area: Stats -type: enhancement -issues: [] diff --git a/docs/changelog/113989.yaml b/docs/changelog/113989.yaml deleted file mode 100644 index 7bf50b52d9e07..0000000000000 --- a/docs/changelog/113989.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 113989 -summary: Add `max_multipart_parts` setting to S3 repository -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/114021.yaml b/docs/changelog/114021.yaml deleted file mode 100644 index e9dab5dce5685..0000000000000 --- a/docs/changelog/114021.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114021 -summary: "ESQL: Speed up grouping by bytes" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114080.yaml b/docs/changelog/114080.yaml deleted file mode 100644 index 395768c46369a..0000000000000 --- a/docs/changelog/114080.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114080 -summary: Stream Cohere Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114109.yaml b/docs/changelog/114109.yaml deleted file mode 100644 index ce51ed50f724c..0000000000000 --- a/docs/changelog/114109.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114109 -summary: Update cluster stats for retrievers -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/114128.yaml b/docs/changelog/114128.yaml deleted file mode 100644 index 721649d0d6fe0..0000000000000 --- a/docs/changelog/114128.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114128 -summary: Adding `index_template_substitutions` to the simulate ingest API -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114157.yaml b/docs/changelog/114157.yaml deleted file mode 100644 index 22e0fda173e98..0000000000000 --- a/docs/changelog/114157.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114157 -summary: Add a `terminate` ingest processor -area: Ingest Node -type: feature -issues: - - 110218 diff --git a/docs/changelog/114168.yaml b/docs/changelog/114168.yaml deleted file mode 100644 index 58f1ab7110e7d..0000000000000 --- a/docs/changelog/114168.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114168 -summary: Add a query rules tester API call -area: Relevance -type: enhancement -issues: [] diff --git a/docs/changelog/114234.yaml b/docs/changelog/114234.yaml deleted file mode 100644 index 0f77ada794bee..0000000000000 --- a/docs/changelog/114234.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114234 -summary: Prevent flattening of ordered and unordered interval sources -area: Search -type: bug -issues: [] diff --git a/docs/changelog/114271.yaml b/docs/changelog/114271.yaml deleted file mode 100644 index 7b47b922ff811..0000000000000 --- a/docs/changelog/114271.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114271 -summary: "[ES|QL] Skip validating remote cluster index names in parser" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114295.yaml b/docs/changelog/114295.yaml deleted file mode 100644 index 2acdc293a206c..0000000000000 --- a/docs/changelog/114295.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114295 -summary: "Reprocess operator file settings when settings service starts, due to node restart or master node change" -area: Infra/Settings -type: enhancement -issues: [ ] diff --git a/docs/changelog/114309.yaml b/docs/changelog/114309.yaml deleted file mode 100644 index bcd1262062943..0000000000000 --- a/docs/changelog/114309.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114309 -summary: Upgrade to AWS SDK v2 -area: Machine Learning -type: enhancement -issues: - - 110590 diff --git a/docs/changelog/114321.yaml b/docs/changelog/114321.yaml deleted file mode 100644 index 286a72cfee840..0000000000000 --- a/docs/changelog/114321.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114321 -summary: Stream Anthropic Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114358.yaml b/docs/changelog/114358.yaml deleted file mode 100644 index 972bc5bfdbe1c..0000000000000 --- a/docs/changelog/114358.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114358 -summary: "ESQL: Use less memory in listener" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114363.yaml b/docs/changelog/114363.yaml deleted file mode 100644 index 51ca9ed34a7ca..0000000000000 --- a/docs/changelog/114363.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114363 -summary: Give the kibana system user permission to read security entities -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/114368.yaml b/docs/changelog/114368.yaml deleted file mode 100644 index 6c6e215a1bd49..0000000000000 --- a/docs/changelog/114368.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114368 -summary: "ESQL: Delay construction of warnings" -area: EQL -type: enhancement -issues: [] diff --git a/docs/changelog/114375.yaml b/docs/changelog/114375.yaml deleted file mode 100644 index 7ff7cc60b34ba..0000000000000 --- a/docs/changelog/114375.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114375 -summary: Handle `InternalSendException` inline for non-forking handlers -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/114382.yaml b/docs/changelog/114382.yaml deleted file mode 100644 index 9f572e14f4737..0000000000000 --- a/docs/changelog/114382.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114382 -summary: "[ES|QL] Add hypot function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114386.yaml b/docs/changelog/114386.yaml deleted file mode 100644 index cf9edda9de21e..0000000000000 --- a/docs/changelog/114386.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114386 -summary: Improve handling of failure to create persistent task -area: Task Management -type: bug -issues: [] diff --git a/docs/changelog/114389.yaml b/docs/changelog/114389.yaml deleted file mode 100644 index f56b165bc917e..0000000000000 --- a/docs/changelog/114389.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114389 -summary: Filter empty task settings objects from the API response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114411.yaml b/docs/changelog/114411.yaml deleted file mode 100644 index 23bff3c8e25ba..0000000000000 --- a/docs/changelog/114411.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114411 -summary: "ESQL: Push down filters even in case of renames in Evals" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114429.yaml b/docs/changelog/114429.yaml deleted file mode 100644 index 56b0ffe7b43fb..0000000000000 --- a/docs/changelog/114429.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114429 -summary: Add chunking settings configuration to `ElasticsearchService/ELSER` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114439.yaml b/docs/changelog/114439.yaml deleted file mode 100644 index fd097d02f885f..0000000000000 --- a/docs/changelog/114439.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114439 -summary: Adding new bbq index types behind a feature flag -area: Vector Search -type: feature -issues: [] diff --git a/docs/changelog/114453.yaml b/docs/changelog/114453.yaml deleted file mode 100644 index 0d5345ad9d2a6..0000000000000 --- a/docs/changelog/114453.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114453 -summary: Switch default chunking strategy to sentence -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114457.yaml b/docs/changelog/114457.yaml deleted file mode 100644 index 9558c41852f69..0000000000000 --- a/docs/changelog/114457.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114457 -summary: "[Inference API] Introduce Update API to change some aspects of existing\ - \ inference endpoints" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114464.yaml b/docs/changelog/114464.yaml deleted file mode 100644 index 5f5ee816aa28d..0000000000000 --- a/docs/changelog/114464.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114464 -summary: Stream Azure Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114512.yaml b/docs/changelog/114512.yaml deleted file mode 100644 index 10dea3a2cbac1..0000000000000 --- a/docs/changelog/114512.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114512 -summary: Ensure clean thread context in `MasterService` -area: Cluster Coordination -type: bug -issues: [] diff --git a/docs/changelog/114527.yaml b/docs/changelog/114527.yaml deleted file mode 100644 index 74d95edcd1a1d..0000000000000 --- a/docs/changelog/114527.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114527 -summary: Verify Maxmind database types in the geoip processor -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/114549.yaml b/docs/changelog/114549.yaml deleted file mode 100644 index a6bdbba93876b..0000000000000 --- a/docs/changelog/114549.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114549 -summary: Send mid-stream errors to users -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/114552.yaml b/docs/changelog/114552.yaml deleted file mode 100644 index 00e2f95b5038d..0000000000000 --- a/docs/changelog/114552.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114552 -summary: Improve exception message for bad environment variable placeholders in settings -area: Infra/Settings -type: enhancement -issues: [110858] diff --git a/docs/changelog/114596.yaml b/docs/changelog/114596.yaml deleted file mode 100644 index a36978dcacd8c..0000000000000 --- a/docs/changelog/114596.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114596 -summary: Stream Google Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114638.yaml b/docs/changelog/114638.yaml deleted file mode 100644 index 0386aacfe3e18..0000000000000 --- a/docs/changelog/114638.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 114638 -summary: "ES|QL: Restrict sorting for `_source` and counter field types" -area: ES|QL -type: bug -issues: - - 114423 - - 111976 diff --git a/docs/changelog/114683.yaml b/docs/changelog/114683.yaml deleted file mode 100644 index a677e65a12b0e..0000000000000 --- a/docs/changelog/114683.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114683 -summary: Default inference endpoint for the multilingual-e5-small model -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114715.yaml b/docs/changelog/114715.yaml deleted file mode 100644 index 0894cb2fa42ca..0000000000000 --- a/docs/changelog/114715.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114715 -summary: Ignore unrecognized openai sse fields -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/114719.yaml b/docs/changelog/114719.yaml deleted file mode 100644 index 477d656d5b979..0000000000000 --- a/docs/changelog/114719.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114719 -summary: Wait for allocation on scale up -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114732.yaml b/docs/changelog/114732.yaml deleted file mode 100644 index 42176cdbda443..0000000000000 --- a/docs/changelog/114732.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114732 -summary: Stream Bedrock Completion -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114750.yaml b/docs/changelog/114750.yaml deleted file mode 100644 index f7a3c8c283934..0000000000000 --- a/docs/changelog/114750.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114750 -summary: Create an ml node inference endpoint referencing an existing model -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/114774.yaml b/docs/changelog/114774.yaml deleted file mode 100644 index 1becfe427fda0..0000000000000 --- a/docs/changelog/114774.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114774 -summary: "ESQL: Add support for multivalue fields in Arrow output" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/114784.yaml b/docs/changelog/114784.yaml deleted file mode 100644 index 24ebe8b5fc09a..0000000000000 --- a/docs/changelog/114784.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114784 -summary: "[ES|QL] make named parameter for identifier and pattern snapshot" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114836.yaml b/docs/changelog/114836.yaml deleted file mode 100644 index 6f21d3bfb9327..0000000000000 --- a/docs/changelog/114836.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114836 -summary: Support multi-valued fields in compute engine for ST_DISTANCE -area: ES|QL -type: enhancement -issues: - - 112910 diff --git a/docs/changelog/114848.yaml b/docs/changelog/114848.yaml deleted file mode 100644 index db41e8496f787..0000000000000 --- a/docs/changelog/114848.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114848 -summary: "ESQL: Fix grammar changes around per agg filtering" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/114854.yaml b/docs/changelog/114854.yaml deleted file mode 100644 index 144a10ba85043..0000000000000 --- a/docs/changelog/114854.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 114854 -summary: Adding deprecation warnings for rrf using rank and `sub_searches` -area: Search -type: deprecation -issues: [] -deprecation: - title: Adding deprecation warnings for rrf using rank and `sub_searches` - area: REST API - details: Search API parameter `sub_searches` will no longer be a supported and will be removed in future releases. Similarly, `rrf` can only be used through the specified `retriever` and no longer though the `rank` parameter - impact: Requests specifying rrf through `rank` and/or `sub_searches` elements will be disallowed in a future version. Users should instead utilize the new `retriever` parameter. diff --git a/docs/changelog/114856.yaml b/docs/changelog/114856.yaml deleted file mode 100644 index da7fae3ee18ea..0000000000000 --- a/docs/changelog/114856.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114856 -summary: "OTel mappings: avoid metrics to be rejected when attributes are malformed" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/114888.yaml b/docs/changelog/114888.yaml deleted file mode 100644 index 6b99eb82d10f3..0000000000000 --- a/docs/changelog/114888.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114888 -summary: Fix ST_CENTROID_AGG when no records are aggregated -area: ES|QL -type: bug -issues: - - 106025 diff --git a/docs/changelog/114951.yaml b/docs/changelog/114951.yaml deleted file mode 100644 index 4d40a063e2b02..0000000000000 --- a/docs/changelog/114951.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114951 -summary: Expose cluster-state role mappings in APIs -area: Authentication -type: bug -issues: [] diff --git a/docs/changelog/114990.yaml b/docs/changelog/114990.yaml deleted file mode 100644 index 2575942d15bf5..0000000000000 --- a/docs/changelog/114990.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114990 -summary: Allow for querries on `_tier` to skip shards in the `can_match` phase -area: Search -type: bug -issues: - - 114910 diff --git a/docs/changelog/115031.yaml b/docs/changelog/115031.yaml deleted file mode 100644 index d8d6e1a3f8166..0000000000000 --- a/docs/changelog/115031.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115031 -summary: Bool query early termination should also consider `must_not` clauses -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/115048.yaml b/docs/changelog/115048.yaml deleted file mode 100644 index 10844b83c6d01..0000000000000 --- a/docs/changelog/115048.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115048 -summary: Add timeout and cancellation check to rescore phase -area: Ranking -type: enhancement -issues: [] diff --git a/docs/changelog/115061.yaml b/docs/changelog/115061.yaml deleted file mode 100644 index 7d40d5ae2629e..0000000000000 --- a/docs/changelog/115061.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115061 -summary: "[ES|QL] Simplify syntax of named parameter for identifier and pattern" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/115117.yaml b/docs/changelog/115117.yaml deleted file mode 100644 index de2defcd46afd..0000000000000 --- a/docs/changelog/115117.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115117 -summary: Report JVM stats for all memory pools (97046) -area: Infra/Core -type: bug -issues: - - 97046 diff --git a/docs/changelog/115147.yaml b/docs/changelog/115147.yaml deleted file mode 100644 index 36f40bba1da17..0000000000000 --- a/docs/changelog/115147.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115147 -summary: Fix IPinfo geolocation schema -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/115194.yaml b/docs/changelog/115194.yaml deleted file mode 100644 index 0b201b9f89aa5..0000000000000 --- a/docs/changelog/115194.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 115194 -summary: Update APM Java Agent to support JDK 23 -area: Infra/Metrics -type: upgrade -issues: - - 115101 - - 115100 diff --git a/docs/changelog/115245.yaml b/docs/changelog/115245.yaml deleted file mode 100644 index 294328567c3aa..0000000000000 --- a/docs/changelog/115245.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 115245 -summary: "ESQL: Fix `REVERSE` with backspace character" -area: ES|QL -type: bug -issues: - - 114372 - - 115227 - - 115228 diff --git a/docs/changelog/115312.yaml b/docs/changelog/115312.yaml deleted file mode 100644 index acf6bbc69c36c..0000000000000 --- a/docs/changelog/115312.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115312 -summary: "ESQL: Fix filtered grouping on ords" -area: ES|QL -type: bug -issues: - - 114897 diff --git a/docs/changelog/115317.yaml b/docs/changelog/115317.yaml deleted file mode 100644 index 153f7a52f0674..0000000000000 --- a/docs/changelog/115317.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115317 -summary: Revert "Add `ResolvedExpression` wrapper" -area: Indices APIs -type: bug -issues: [] diff --git a/docs/changelog/115399.yaml b/docs/changelog/115399.yaml deleted file mode 100644 index 9f69657a5d167..0000000000000 --- a/docs/changelog/115399.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 115399 -summary: Adding breaking change entry for retrievers -area: Search -type: breaking -issues: [] -breaking: - title: Reworking RRF retriever to be evaluated during rewrite phase - area: REST API - details: |- - In this release (8.16), we have introduced major changes to the retrievers framework - and how they can be evaluated, focusing mainly on compound retrievers - like `rrf` and `text_similarity_reranker`, which allowed us to support full - composability (i.e. any retriever can be nested under any compound retriever), - as well as supporting additional search features like collapsing, explaining, - aggregations, and highlighting. - - To ensure consistency, and given that this rework is not available until 8.16, - `rrf` and `text_similarity_reranker` retriever queries would now - throw an exception in a mixed cluster scenario, where there are nodes - both in current or later (i.e. >= 8.16) and previous ( <= 8.15) versions. - - As part of the rework, we have also removed the `_rank` property from - the responses of an `rrf` retriever. - impact: |- - - Users will not be able to use the `rrf` and `text_similarity_reranker` retrievers in a mixed cluster scenario - with previous releases (i.e. prior to 8.16), and the request will throw an `IllegalArgumentException`. - - `_rank` has now been removed from the output of the `rrf` retrievers so trying to directly parse the field - will throw an exception - notable: false diff --git a/docs/changelog/115404.yaml b/docs/changelog/115404.yaml deleted file mode 100644 index e443b152955f3..0000000000000 --- a/docs/changelog/115404.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115404 -summary: Fix NPE in Get Deployment Stats -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115429.yaml b/docs/changelog/115429.yaml deleted file mode 100644 index ddf3c69183000..0000000000000 --- a/docs/changelog/115429.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115429 -summary: "[otel-data] Add more kubernetes aliases" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/115594.yaml b/docs/changelog/115594.yaml deleted file mode 100644 index 91a6089dfb3ce..0000000000000 --- a/docs/changelog/115594.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115594 -summary: Update `BlobCacheBufferedIndexInput::readVLong` to correctly handle negative - long values -area: Search -type: bug -issues: [] diff --git a/docs/changelog/115624.yaml b/docs/changelog/115624.yaml deleted file mode 100644 index 1992ed65679ca..0000000000000 --- a/docs/changelog/115624.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 115624 -summary: "ES|QL: fix LIMIT pushdown past MV_EXPAND" -area: ES|QL -type: bug -issues: - - 102084 - - 102061 diff --git a/docs/changelog/115656.yaml b/docs/changelog/115656.yaml deleted file mode 100644 index 13b612b052fc1..0000000000000 --- a/docs/changelog/115656.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115656 -summary: Fix stream support for `TaskType.ANY` -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115715.yaml b/docs/changelog/115715.yaml deleted file mode 100644 index 378f2c42e5e50..0000000000000 --- a/docs/changelog/115715.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115715 -summary: Avoid `catch (Throwable t)` in `AmazonBedrockStreamingChatProcessor` -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115811.yaml b/docs/changelog/115811.yaml deleted file mode 100644 index 292dc91ecb928..0000000000000 --- a/docs/changelog/115811.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115811 -summary: "Prohibit changes to index mode, source, and sort settings during restore" -area: Logs -type: bug -issues: [] diff --git a/docs/changelog/115823.yaml b/docs/changelog/115823.yaml deleted file mode 100644 index a6119e0fa56e4..0000000000000 --- a/docs/changelog/115823.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115823 -summary: Add ECK Role Mapping Cleanup -area: Security -type: bug -issues: [] diff --git a/docs/changelog/115868.yaml b/docs/changelog/115868.yaml deleted file mode 100644 index abe6a63c3a4d8..0000000000000 --- a/docs/changelog/115868.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115868 -summary: Forward bedrock connection errors to user -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115952.yaml b/docs/changelog/115952.yaml deleted file mode 100644 index ec57a639dc0ae..0000000000000 --- a/docs/changelog/115952.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115952 -summary: "ESQL: Fix a bug in VALUES agg" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/116015.yaml b/docs/changelog/116015.yaml deleted file mode 100644 index 693fad639f2fa..0000000000000 --- a/docs/changelog/116015.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116015 -summary: Empty percentile results no longer throw no_such_element_exception in Anomaly Detection jobs -area: Machine Learning -type: bug -issues: - - 116013 diff --git a/docs/changelog/116086.yaml b/docs/changelog/116086.yaml deleted file mode 100644 index 73ad77d637a46..0000000000000 --- a/docs/changelog/116086.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116086 -summary: "ESQL: Fix DEBUG log of filter" -area: ES|QL -type: bug -issues: - - 116055 diff --git a/docs/changelog/116212.yaml b/docs/changelog/116212.yaml deleted file mode 100644 index 7c8756f4054cd..0000000000000 --- a/docs/changelog/116212.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116212 -summary: Handle status code 0 in S3 CMU response -area: Snapshot/Restore -type: bug -issues: - - 102294 diff --git a/docs/changelog/116266.yaml b/docs/changelog/116266.yaml deleted file mode 100644 index 1fcc0c310962d..0000000000000 --- a/docs/changelog/116266.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116266 -summary: Align dot prefix validation with Serverless -area: Indices APIs -type: bug -issues: [] diff --git a/docs/changelog/116274.yaml b/docs/changelog/116274.yaml deleted file mode 100644 index 9d506c7725afd..0000000000000 --- a/docs/changelog/116274.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 116274 -summary: "[ES|QL] Verify aggregation filter's type is boolean to avoid `class_cast_exception`" -area: ES|QL -type: bug -issues: [] From f529e12abdcfff13895fdf7da67dfbcf71bb3160 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Tue, 12 Nov 2024 17:51:52 +0100 Subject: [PATCH 66/95] Unmutes elastic#116332 after backporting tests to v8.x (#116612) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index ddd806d49ae5f..f2ca6e3d00424 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -192,9 +192,6 @@ tests: - class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests method: testBottomFieldSort issue: https://github.com/elastic/elasticsearch/issues/116249 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} - issue: https://github.com/elastic/elasticsearch/issues/116332 - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT method: testAllocationPreventedForRemoval issue: https://github.com/elastic/elasticsearch/issues/116363 From 828dff0017549c1f53b5cca8d9fb69bc91758f02 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Tue, 12 Nov 2024 11:47:40 -0600 Subject: [PATCH 67/95] Fix test failure (#116532) This commit fixes a test failure by using the latest known version instead of the latest version from a map. fixes: #116520 --- muted-tests.yml | 3 --- .../authz/permission/RemoteClusterPermissionsTests.java | 4 +++- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index f2ca6e3d00424..b8a3e5a568732 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -212,9 +212,6 @@ tests: - class: org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotsCanMatchOnCoordinatorIntegTests method: testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQueryingAnyNodeWhenTheyAreOutsideOfTheQueryRange issue: https://github.com/elastic/elasticsearch/issues/116523 -- class: org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionsTests - method: testCollapseAndRemoveUnsupportedPrivileges - issue: https://github.com/elastic/elasticsearch/issues/116520 - class: org.elasticsearch.xpack.logsdb.qa.StandardVersusLogsIndexModeRandomDataDynamicMappingChallengeRestIT method: testMatchAllQuery issue: https://github.com/elastic/elasticsearch/issues/116536 diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java index 2c31965009273..a39aff3a6137f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/RemoteClusterPermissionsTests.java @@ -131,7 +131,9 @@ public void testCollapseAndRemoveUnsupportedPrivileges() { // create random groups with random privileges for random clusters List randomGroups = generateRandomGroups(true); // replace a random value with one that is allowed - String singleValidPrivilege = randomFrom(RemoteClusterPermissions.allowedRemoteClusterPermissions.get(TransportVersion.current())); + String singleValidPrivilege = randomFrom( + RemoteClusterPermissions.allowedRemoteClusterPermissions.get(lastTransportVersionPermission) + ); groupPrivileges.get(0)[0] = singleValidPrivilege; for (int i = 0; i < randomGroups.size(); i++) { From 1bc5e33a446c5bd8cd7b26a2ad3642e5ccb54b0d Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 13 Nov 2024 08:47:16 +1100 Subject: [PATCH 68/95] Mute org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT testSettingsApplied #116694 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index b8a3e5a568732..53bbe4fbc1d22 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -233,6 +233,9 @@ tests: - class: org.elasticsearch.packaging.test.DockerTests method: test011SecurityEnabledStatus issue: https://github.com/elastic/elasticsearch/issues/116628 +- class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT + method: testSettingsApplied + issue: https://github.com/elastic/elasticsearch/issues/116694 # Examples: # From b4898c959f1470b7acf99c35ba714763d4f70521 Mon Sep 17 00:00:00 2001 From: Gal Lalouche Date: Wed, 13 Nov 2024 00:42:19 +0200 Subject: [PATCH 69/95] [ES|QL] Add support BYTE_LENGTH scalar function (#116591) Also added documentation and examples for BIT_LENGTH and LENGTH regarding unicode. --- docs/changelog/116591.yaml | 5 + .../functions/description/bit_length.asciidoc | 2 + .../description/byte_length.asciidoc | 7 + .../functions/description/length.asciidoc | 2 + .../functions/examples/byte_length.asciidoc | 13 + .../kibana/definition/bit_length.json | 38 +++ .../kibana/definition/byte_length.json | 38 +++ .../functions/kibana/definition/length.json | 3 +- .../esql/functions/kibana/docs/bit_length.md | 8 +- .../esql/functions/kibana/docs/byte_length.md | 14 + .../esql/functions/kibana/docs/length.md | 5 +- .../functions/layout/byte_length.asciidoc | 15 + .../functions/parameters/byte_length.asciidoc | 6 + .../esql/functions/signature/byte_length.svg | 1 + .../esql/functions/string-functions.asciidoc | 2 + .../esql/functions/types/byte_length.asciidoc | 10 + .../src/main/resources/docs.csv-spec | 59 ++-- .../src/main/resources/eval.csv-spec | 261 ++++++++++-------- .../scalar/string/ByteLengthEvaluator.java | 127 +++++++++ .../xpack/esql/action/EsqlCapabilities.java | 5 + .../function/EsqlFunctionRegistry.java | 2 + .../function/scalar/UnaryScalarFunction.java | 2 + .../function/scalar/string/BitLength.java | 1 + .../function/scalar/string/ByteLength.java | 92 ++++++ .../function/scalar/string/Length.java | 1 + .../string/ByteLengthSerializationTests.java | 19 ++ .../scalar/string/ByteLengthTests.java | 77 ++++++ .../xpack/esql/planner/EvalMapperTests.java | 2 + .../rest-api-spec/test/esql/60_usage.yml | 9 +- 29 files changed, 667 insertions(+), 159 deletions(-) create mode 100644 docs/changelog/116591.yaml create mode 100644 docs/reference/esql/functions/description/byte_length.asciidoc create mode 100644 docs/reference/esql/functions/examples/byte_length.asciidoc create mode 100644 docs/reference/esql/functions/kibana/definition/bit_length.json create mode 100644 docs/reference/esql/functions/kibana/definition/byte_length.json create mode 100644 docs/reference/esql/functions/kibana/docs/byte_length.md create mode 100644 docs/reference/esql/functions/layout/byte_length.asciidoc create mode 100644 docs/reference/esql/functions/parameters/byte_length.asciidoc create mode 100644 docs/reference/esql/functions/signature/byte_length.svg create mode 100644 docs/reference/esql/functions/types/byte_length.asciidoc create mode 100644 x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLength.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthSerializationTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthTests.java diff --git a/docs/changelog/116591.yaml b/docs/changelog/116591.yaml new file mode 100644 index 0000000000000..60ef241e197b3 --- /dev/null +++ b/docs/changelog/116591.yaml @@ -0,0 +1,5 @@ +pr: 116591 +summary: "Add support for `BYTE_LENGTH` scalar function" +area: ES|QL +type: feature +issues: [] diff --git a/docs/reference/esql/functions/description/bit_length.asciidoc b/docs/reference/esql/functions/description/bit_length.asciidoc index 1aad47488802d..3a3dd80d2bb0f 100644 --- a/docs/reference/esql/functions/description/bit_length.asciidoc +++ b/docs/reference/esql/functions/description/bit_length.asciidoc @@ -3,3 +3,5 @@ *Description* Returns the bit length of a string. + +NOTE: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/description/byte_length.asciidoc b/docs/reference/esql/functions/description/byte_length.asciidoc new file mode 100644 index 0000000000000..c2150806e09ac --- /dev/null +++ b/docs/reference/esql/functions/description/byte_length.asciidoc @@ -0,0 +1,7 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Returns the byte length of a string. + +NOTE: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/description/length.asciidoc b/docs/reference/esql/functions/description/length.asciidoc index bf976e3d6e507..91525fda0c086 100644 --- a/docs/reference/esql/functions/description/length.asciidoc +++ b/docs/reference/esql/functions/description/length.asciidoc @@ -3,3 +3,5 @@ *Description* Returns the character length of a string. + +NOTE: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/examples/byte_length.asciidoc b/docs/reference/esql/functions/examples/byte_length.asciidoc new file mode 100644 index 0000000000000..d6b557fcd2e76 --- /dev/null +++ b/docs/reference/esql/functions/examples/byte_length.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/eval.csv-spec[tag=byteLength] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/eval.csv-spec[tag=byteLength-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/bit_length.json b/docs/reference/esql/functions/kibana/definition/bit_length.json new file mode 100644 index 0000000000000..156a063984e4d --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/bit_length.json @@ -0,0 +1,38 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "bit_length", + "description" : "Returns the bit length of a string.", + "note" : "All strings are in UTF-8, so a single character can use multiple bytes.", + "signatures" : [ + { + "params" : [ + { + "name" : "string", + "type" : "keyword", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "string", + "type" : "text", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "integer" + } + ], + "examples" : [ + "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length=LENGTH(city), fn_bit_length = BIT_LENGTH(city)" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/byte_length.json b/docs/reference/esql/functions/kibana/definition/byte_length.json new file mode 100644 index 0000000000000..c8280a572fc62 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/byte_length.json @@ -0,0 +1,38 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "byte_length", + "description" : "Returns the byte length of a string.", + "note" : "All strings are in UTF-8, so a single character can use multiple bytes.", + "signatures" : [ + { + "params" : [ + { + "name" : "string", + "type" : "keyword", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "string", + "type" : "text", + "optional" : false, + "description" : "String expression. If `null`, the function returns `null`." + } + ], + "variadic" : false, + "returnType" : "integer" + } + ], + "examples" : [ + "FROM airports\n| WHERE country == \"India\"\n| KEEP city\n| EVAL fn_length=LENGTH(city), fn_byte_length = BYTE_LENGTH(city)" + ], + "preview" : false, + "snapshot_only" : false +} diff --git a/docs/reference/esql/functions/kibana/definition/length.json b/docs/reference/esql/functions/kibana/definition/length.json index 0da505cf5ffa7..9ea340ebf7420 100644 --- a/docs/reference/esql/functions/kibana/definition/length.json +++ b/docs/reference/esql/functions/kibana/definition/length.json @@ -3,6 +3,7 @@ "type" : "eval", "name" : "length", "description" : "Returns the character length of a string.", + "note" : "All strings are in UTF-8, so a single character can use multiple bytes.", "signatures" : [ { "params" : [ @@ -30,7 +31,7 @@ } ], "examples" : [ - "FROM employees\n| KEEP first_name, last_name\n| EVAL fn_length = LENGTH(first_name)" + "FROM airports\n| KEEP city\n| EVAL fn_length = LENGTH(first_name)" ], "preview" : false, "snapshot_only" : false diff --git a/docs/reference/esql/functions/kibana/docs/bit_length.md b/docs/reference/esql/functions/kibana/docs/bit_length.md index 22280febd7876..253b2cdb6a7c6 100644 --- a/docs/reference/esql/functions/kibana/docs/bit_length.md +++ b/docs/reference/esql/functions/kibana/docs/bit_length.md @@ -6,7 +6,9 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ Returns the bit length of a string. ``` -FROM employees -| KEEP first_name, last_name -| EVAL fn_bit_length = BIT_LENGTH(first_name) +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length=LENGTH(city), fn_bit_length = BIT_LENGTH(city) ``` +Note: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/kibana/docs/byte_length.md b/docs/reference/esql/functions/kibana/docs/byte_length.md new file mode 100644 index 0000000000000..20d96ce38400d --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/byte_length.md @@ -0,0 +1,14 @@ + + +### BYTE_LENGTH +Returns the byte length of a string. + +``` +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length=LENGTH(city), fn_byte_length = BYTE_LENGTH(city) +``` +Note: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/kibana/docs/length.md b/docs/reference/esql/functions/kibana/docs/length.md index 19e3533e0ddfb..ce7726d092bae 100644 --- a/docs/reference/esql/functions/kibana/docs/length.md +++ b/docs/reference/esql/functions/kibana/docs/length.md @@ -6,7 +6,8 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ Returns the character length of a string. ``` -FROM employees -| KEEP first_name, last_name +FROM airports +| KEEP city | EVAL fn_length = LENGTH(first_name) ``` +Note: All strings are in UTF-8, so a single character can use multiple bytes. diff --git a/docs/reference/esql/functions/layout/byte_length.asciidoc b/docs/reference/esql/functions/layout/byte_length.asciidoc new file mode 100644 index 0000000000000..56dc341264e0f --- /dev/null +++ b/docs/reference/esql/functions/layout/byte_length.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-byte_length]] +=== `BYTE_LENGTH` + +*Syntax* + +[.text-center] +image::esql/functions/signature/byte_length.svg[Embedded,opts=inline] + +include::../parameters/byte_length.asciidoc[] +include::../description/byte_length.asciidoc[] +include::../types/byte_length.asciidoc[] +include::../examples/byte_length.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/byte_length.asciidoc b/docs/reference/esql/functions/parameters/byte_length.asciidoc new file mode 100644 index 0000000000000..7bb8c080ce4a1 --- /dev/null +++ b/docs/reference/esql/functions/parameters/byte_length.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`string`:: +String expression. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/signature/byte_length.svg b/docs/reference/esql/functions/signature/byte_length.svg new file mode 100644 index 0000000000000..d88821e46e926 --- /dev/null +++ b/docs/reference/esql/functions/signature/byte_length.svg @@ -0,0 +1 @@ +BYTE_LENGTH(string) \ No newline at end of file diff --git a/docs/reference/esql/functions/string-functions.asciidoc b/docs/reference/esql/functions/string-functions.asciidoc index 422860f0a7a1d..ce9636f5c5a3a 100644 --- a/docs/reference/esql/functions/string-functions.asciidoc +++ b/docs/reference/esql/functions/string-functions.asciidoc @@ -9,6 +9,7 @@ // tag::string_list[] * <> +* <> * <> * <> * <> @@ -32,6 +33,7 @@ // end::string_list[] include::layout/bit_length.asciidoc[] +include::layout/byte_length.asciidoc[] include::layout/concat.asciidoc[] include::layout/ends_with.asciidoc[] include::layout/from_base64.asciidoc[] diff --git a/docs/reference/esql/functions/types/byte_length.asciidoc b/docs/reference/esql/functions/types/byte_length.asciidoc new file mode 100644 index 0000000000000..db5a48c7c4390 --- /dev/null +++ b/docs/reference/esql/functions/types/byte_length.asciidoc @@ -0,0 +1,10 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +string | result +keyword | integer +text | integer +|=== diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index 14d811535aafd..a53777cff7c71 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -4,7 +4,7 @@ // the comments in whatever file the test already lives in. If you have to // write a new test to make an example in the docs then put it in whatever // file matches its "theme" best. Put it next to similar tests. Not here. - + // Also! When Nik originally extracted examples from the docs to make them // testable he didn't spend a lot of time putting the docs into appropriate // files. He just made this one. He didn't put his toys away. We'd be better @@ -352,18 +352,18 @@ FROM employees // tag::case-result[] emp_no:integer | languages:integer| type:keyword -10001 | 2 |bilingual -10002 | 5 |polyglot -10003 | 4 |polyglot -10004 | 5 |polyglot -10005 | 1 |monolingual +10001 | 2 |bilingual +10002 | 5 |polyglot +10003 | 4 |polyglot +10004 | 5 |polyglot +10005 | 1 |monolingual // end::case-result[] ; docsCountAll // tag::countAll[] -FROM employees -| STATS count = COUNT(*) BY languages +FROM employees +| STATS count = COUNT(*) BY languages | SORT languages DESC // end::countAll[] ; @@ -371,7 +371,7 @@ FROM employees // tag::countAll-result[] count:long | languages:integer 10 |null -21 |5 +21 |5 18 |4 17 |3 19 |2 @@ -381,8 +381,8 @@ count:long | languages:integer basicGrok // tag::basicGrok[] -ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}""" +ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}""" | KEEP date, ip, email, num // end::basicGrok[] ; @@ -395,8 +395,8 @@ date:keyword | ip:keyword | email:keyword | num:keyword grokWithConversionSuffix // tag::grokWithConversionSuffix[] -ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" +ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" | KEEP date, ip, email, num // end::grokWithConversionSuffix[] ; @@ -409,8 +409,8 @@ date:keyword | ip:keyword | email:keyword | num:integer grokWithToDatetime // tag::grokWithToDatetime[] -ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" -| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" +ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" +| GROK a """%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}""" | KEEP date, ip, email, num | EVAL date = TO_DATETIME(date) // end::grokWithToDatetime[] @@ -471,7 +471,7 @@ Tokyo | 100-7014 | null basicDissect // tag::basicDissect[] -ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" +ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" | DISSECT a """%{date} - %{msg} - %{ip}""" | KEEP date, msg, ip // end::basicDissect[] @@ -485,8 +485,8 @@ date:keyword | msg:keyword | ip:keyword dissectWithToDatetime // tag::dissectWithToDatetime[] -ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" -| DISSECT a """%{date} - %{msg} - %{ip}""" +ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" +| DISSECT a """%{date} - %{msg} - %{ip}""" | KEEP date, msg, ip | EVAL date = TO_DATETIME(date) // end::dissectWithToDatetime[] @@ -574,8 +574,8 @@ FROM employees // tag::like-result[] first_name:keyword | last_name:keyword -Ebbe |Callaway -Eberhardt |Terkki +Ebbe |Callaway +Eberhardt |Terkki // end::like-result[] ; @@ -589,7 +589,7 @@ FROM employees // tag::rlike-result[] first_name:keyword | last_name:keyword -Alejandro |McAlpine +Alejandro |McAlpine // end::rlike-result[] ; @@ -660,18 +660,19 @@ FROM sample_data docsBitLength required_capability: fn_bit_length // tag::bitLength[] -FROM employees -| KEEP first_name, last_name -| EVAL fn_bit_length = BIT_LENGTH(first_name) +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length=LENGTH(city), fn_bit_length = BIT_LENGTH(city) // end::bitLength[] -| SORT first_name +| SORT city | LIMIT 3 ; // tag::bitLength-result[] -first_name:keyword | last_name:keyword | fn_bit_length:integer -Alejandro |McAlpine |72 -Amabile |Gomatam |56 -Anneke |Preusig |48 +city:keyword | fn_length:integer | fn_bit_length:integer +Agwār | 5 | 48 +Ahmedabad | 9 | 72 +Bangalore | 9 | 72 // end::bitLength-result[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index 61a0ccd4af0c5..fc2350491db91 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -22,7 +22,7 @@ FROM addresses | SORT city.name ; -city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:keyword +city.country.name:keyword | city.name:keyword | city.country.continent.planet.name:keyword Netherlands | Amsterdam | EARTH United States of America | San Francisco | EARTH Japan | Tokyo | EARTH @@ -138,39 +138,39 @@ a:integer | b:integer | c:integer | d:integer | e:integer multipleDuplicateInterleaved1 row a = 1 | eval b = a, c = 1, c = 3, d = b + 1, b = c * 2, c = 2, c = d * c + b | keep a, b, c, d; -a:integer | b:integer | c:integer | d:integer -1 | 6 | 10 | 2 +a:integer | b:integer | c:integer | d:integer +1 | 6 | 10 | 2 ; multipleDuplicateInterleaved2 row a = 1 | eval b = a, c = 1 | eval c = 3, d = b + 1 | eval b = c * 2, c = 2 | eval c = d * c + b | keep a, b, c, d; -a:integer | b:integer | c:integer | d:integer -1 | 6 | 10 | 2 +a:integer | b:integer | c:integer | d:integer +1 | 6 | 10 | 2 ; multipleDuplicateInterleaved3 row a = 1 | eval b = a, c = 1, c = 3 | eval d = b + 1 | eval b = c * 2, c = 2, c = d * c + b | keep a, b, c, d; -a:integer | b:integer | c:integer | d:integer -1 | 6 | 10 | 2 +a:integer | b:integer | c:integer | d:integer +1 | 6 | 10 | 2 ; multipleDuplicateInterleaved4 row a = 1 | eval b = a | eval c = 1 | eval c = 3 | eval d = b + 1 | eval b = c * 2 | eval c = 2 | eval c = d * c + b | keep a, b, c, d; -a:integer | b:integer | c:integer | d:integer -1 | 6 | 10 | 2 +a:integer | b:integer | c:integer | d:integer +1 | 6 | 10 | 2 ; projectEval row x = 1 | keep x | eval a1 = x + 1, a2 = x + 1, a3 = a1 + a2, a1 = a1 + a2; -x:integer | a2:integer | a3:integer | a1:integer -1 | 2 | 4 | 4 +x:integer | a2:integer | a3:integer | a1:integer +1 | 2 | 4 | 4 ; evalNullSort @@ -195,76 +195,76 @@ Uri evalWithIsNullIsNotNull from employees | eval true_bool = null is null, false_bool = null is not null, negated_true = not(null is null), negated_false = not(null is not null) | sort emp_no | limit 1 | keep *true*, *false*, first_name, last_name; -true_bool:boolean | negated_true:boolean | false_bool:boolean | negated_false:boolean | first_name:keyword | last_name:keyword +true_bool:boolean | negated_true:boolean | false_bool:boolean | negated_false:boolean | first_name:keyword | last_name:keyword true | false | false | true | Georgi | Facello ; repetitiveEval -from employees | sort emp_no | keep emp_no | eval sum = emp_no + 1 -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no -| limit 3 +from employees | sort emp_no | keep emp_no | eval sum = emp_no + 1 +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no | eval sum = sum + emp_no +| limit 3 ; emp_no:i | sum:i 10001 | 3230324 10002 | 3230647 10003 | 3230970 -; +; chainedEvalReusingPreviousValue from employees | sort emp_no | eval x1 = concat(first_name, "."), x2 = concat(x1, "."), x3 = concat(x2, ".") | keep x*, first_name | limit 5; - x1:keyword | x2:keyword | x3:keyword |first_name:keyword -Georgi. |Georgi.. |Georgi... |Georgi -Bezalel. |Bezalel.. |Bezalel... |Bezalel -Parto. |Parto.. |Parto... |Parto -Chirstian. |Chirstian.. |Chirstian... |Chirstian + x1:keyword | x2:keyword | x3:keyword |first_name:keyword +Georgi. |Georgi.. |Georgi... |Georgi +Bezalel. |Bezalel.. |Bezalel... |Bezalel +Parto. |Parto.. |Parto... |Parto +Chirstian. |Chirstian.. |Chirstian... |Chirstian Kyoichi. |Kyoichi.. |Kyoichi... |Kyoichi ; @@ -272,10 +272,10 @@ chainedEvalReusingPreviousValue2 from employees | sort emp_no | eval x1 = concat(first_name, "."), x2 = concat(x1, last_name), x3 = concat(x2, gender) | keep x*, first_name, gender | limit 5; x1:keyword | x2:keyword | x3:keyword |first_name:keyword|gender:keyword -Georgi. |Georgi.Facello |Georgi.FacelloM |Georgi |M -Bezalel. |Bezalel.Simmel |Bezalel.SimmelF |Bezalel |F -Parto. |Parto.Bamford |Parto.BamfordM |Parto |M -Chirstian. |Chirstian.Koblick|Chirstian.KoblickM|Chirstian |M +Georgi. |Georgi.Facello |Georgi.FacelloM |Georgi |M +Bezalel. |Bezalel.Simmel |Bezalel.SimmelF |Bezalel |F +Parto. |Parto.Bamford |Parto.BamfordM |Parto |M +Chirstian. |Chirstian.Koblick|Chirstian.KoblickM|Chirstian |M Kyoichi. |Kyoichi.Maliniak |Kyoichi.MaliniakM |Kyoichi |M ; @@ -283,10 +283,10 @@ chainedEvalReusingPreviousValue3 from employees | sort emp_no | eval x1 = concat(first_name, "."), x2 = concat(x1, last_name), x3 = concat(x2, x1) | keep x*, first_name | limit 5; x1:keyword | x2:keyword | x3:keyword |first_name:keyword -Georgi. |Georgi.Facello |Georgi.FacelloGeorgi. |Georgi -Bezalel. |Bezalel.Simmel |Bezalel.SimmelBezalel. |Bezalel -Parto. |Parto.Bamford |Parto.BamfordParto. |Parto -Chirstian. |Chirstian.Koblick|Chirstian.KoblickChirstian.|Chirstian +Georgi. |Georgi.Facello |Georgi.FacelloGeorgi. |Georgi +Bezalel. |Bezalel.Simmel |Bezalel.SimmelBezalel. |Bezalel +Parto. |Parto.Bamford |Parto.BamfordParto. |Parto +Chirstian. |Chirstian.Koblick|Chirstian.KoblickChirstian.|Chirstian Kyoichi. |Kyoichi.Maliniak |Kyoichi.MaliniakKyoichi. |Kyoichi ; @@ -301,7 +301,7 @@ warning:Line 1:88: java.lang.IllegalArgumentException: single-value function enc warning:Line 1:133: evaluation of [round([1.14], [1, 2])] failed, treating result as null. Only first 20 failures recorded. warning:Line 1:133: java.lang.IllegalArgumentException: single-value function encountered multi-value -a:double | b:double | c:double | d: double | e:double | f:double | g:double | h:double +a:double | b:double | c:double | d: double | e:double | f:double | g:double | h:double 1.2 | [2.4, 7.9] | 1.0 | null | 1.0 | null | 1.1 | null ; @@ -356,22 +356,43 @@ FROM sample_data docsLength // tag::length[] -FROM employees -| KEEP first_name, last_name -| EVAL fn_length = LENGTH(first_name) +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length = LENGTH(city) // end::length[] -| SORT first_name +| SORT city | LIMIT 3 ; // tag::length-result[] -first_name:keyword | last_name:keyword | fn_length:integer -Alejandro |McAlpine |9 -Amabile |Gomatam |7 -Anneke |Preusig |6 +city:keyword | fn_length:integer +Agwār | 5 +Ahmedabad | 9 +Bangalore | 9 // end::length-result[] ; +docsByteLength +required_capability: fn_byte_length +// tag::byteLength[] +FROM airports +| WHERE country == "India" +| KEEP city +| EVAL fn_length=LENGTH(city), fn_byte_length = BYTE_LENGTH(city) +// end::byteLength[] +| SORT city +| LIMIT 3 +; + +// tag::byteLength-result[] +city:keyword | fn_length:integer | fn_byte_length:integer +Agwār | 5 | 6 +Ahmedabad | 9 | 9 +Bangalore | 9 | 9 +// end::byteLength-result[] +; + docsGettingStartedEvalNoColumnName // tag::gs-eval-no-column-name[] FROM sample_data @@ -407,8 +428,8 @@ FROM employees // tag::eval-result[] first_name:keyword | last_name:keyword | height:double | height_feet:double | height_cm:double Georgi |Facello |2.03 |6.66043 |202.99999999999997 -Bezalel |Simmel |2.08 |6.82448 |208.0 -Parto |Bamford |1.83 |6.004230000000001 |183.0 +Bezalel |Simmel |2.08 |6.82448 |208.0 +Parto |Bamford |1.83 |6.004230000000001 |183.0 // end::eval-result[] ; @@ -423,9 +444,9 @@ FROM employees // tag::evalReplace-result[] first_name:keyword | last_name:keyword | height:double -Georgi |Facello |6.66043 -Bezalel |Simmel |6.82448 -Parto |Bamford |6.004230000000001 +Georgi |Facello |6.66043 +Bezalel |Simmel |6.82448 +Parto |Bamford |6.004230000000001 // end::evalReplace-result[] ; @@ -440,8 +461,8 @@ FROM employees // tag::evalUnnamedColumn-result[] first_name:keyword | last_name:keyword | height:double | height * 3.281:double -Georgi |Facello |2.03 |6.66043 -Bezalel |Simmel |2.08 |6.82448 +Georgi |Facello |2.03 |6.66043 +Bezalel |Simmel |2.08 |6.82448 Parto |Bamford |1.83 |6.004230000000001 // end::evalUnnamedColumn-result[] ; @@ -524,16 +545,16 @@ FROM employees | KEEP emp_no, salary, sum ; - emp_no:i | salary:i | sum:i --10015 |25324 |35339 --10035 |25945 |35980 --10092 |25976 |36068 --10048 |26436 |36484 --10057 |27215 |37272 --10084 |28035 |38119 --10026 |28336 |38362 --10068 |28941 |39009 --10060 |29175 |39235 + emp_no:i | salary:i | sum:i +-10015 |25324 |35339 +-10035 |25945 |35980 +-10092 |25976 |36068 +-10048 |26436 |36484 +-10057 |27215 |37272 +-10084 |28035 |38119 +-10026 |28336 |38362 +-10068 |28941 |39009 +-10060 |29175 |39235 -10042 |30404 |40446 ; @@ -545,16 +566,16 @@ from employees | limit 10 ; - first_name:keyword | last_name:keyword | salary:integer|ll:keyword|lf:keyword -Mona |Azuma |46595 |A |M -Satosi |Awdeh |50249 |A |S -Brendon |Bernini |33370 |B |B -Breannda |Billingsley |29175 |B |B -Cristinel |Bouloucos |58715 |B |C -Charlene |Brattka |28941 |B |C -Margareta |Bierman |41933 |B |M -Mokhtar |Bernatsky |38992 |B |M -Parto |Bamford |61805 |B |P + first_name:keyword | last_name:keyword | salary:integer|ll:keyword|lf:keyword +Mona |Azuma |46595 |A |M +Satosi |Awdeh |50249 |A |S +Brendon |Bernini |33370 |B |B +Breannda |Billingsley |29175 |B |B +Cristinel |Bouloucos |58715 |B |C +Charlene |Brattka |28941 |B |C +Margareta |Bierman |41933 |B |M +Mokhtar |Bernatsky |38992 |B |M +Parto |Bamford |61805 |B |P Premal |Baek |52833 |B |P ; @@ -568,15 +589,15 @@ from employees | limit 10 ; - fn:keyword | ln:keyword | salary:integer| c:keyword -Mona |Azuma |46595 |AM -Satosi |Awdeh |50249 |AS -Brendon |Bernini |33370 |BB -Breannda |Billingsley |29175 |BB -Cristinel |Bouloucos |58715 |BC -Charlene |Brattka |28941 |BC -Margareta |Bierman |41933 |BM -Mokhtar |Bernatsky |38992 |BM -Parto |Bamford |61805 |BP + fn:keyword | ln:keyword | salary:integer| c:keyword +Mona |Azuma |46595 |AM +Satosi |Awdeh |50249 |AS +Brendon |Bernini |33370 |BB +Breannda |Billingsley |29175 |BB +Cristinel |Bouloucos |58715 |BC +Charlene |Brattka |28941 |BC +Margareta |Bierman |41933 |BM +Mokhtar |Bernatsky |38992 |BM +Parto |Bamford |61805 |BP Premal |Baek |52833 |BP ; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthEvaluator.java new file mode 100644 index 0000000000000..1b0bff92d7d04 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthEvaluator.java @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Warnings; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ByteLength}. + * This class is generated. Do not edit it. + */ +public final class ByteLengthEvaluator implements EvalOperator.ExpressionEvaluator { + private final Source source; + + private final EvalOperator.ExpressionEvaluator val; + + private final DriverContext driverContext; + + private Warnings warnings; + + public ByteLengthEvaluator(Source source, EvalOperator.ExpressionEvaluator val, + DriverContext driverContext) { + this.source = source; + this.val = val; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock valBlock = (BytesRefBlock) val.eval(page)) { + BytesRefVector valVector = valBlock.asVector(); + if (valVector == null) { + return eval(page.getPositionCount(), valBlock); + } + return eval(page.getPositionCount(), valVector).asBlock(); + } + } + + public IntBlock eval(int positionCount, BytesRefBlock valBlock) { + try(IntBlock.Builder result = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { + BytesRef valScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (valBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (valBlock.getValueCount(p) != 1) { + if (valBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendInt(ByteLength.process(valBlock.getBytesRef(valBlock.getFirstValueIndex(p), valScratch))); + } + return result.build(); + } + } + + public IntVector eval(int positionCount, BytesRefVector valVector) { + try(IntVector.FixedBuilder result = driverContext.blockFactory().newIntVectorFixedBuilder(positionCount)) { + BytesRef valScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendInt(p, ByteLength.process(valVector.getBytesRef(p, valScratch))); + } + return result.build(); + } + } + + @Override + public String toString() { + return "ByteLengthEvaluator[" + "val=" + val + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(val); + } + + private Warnings warnings() { + if (warnings == null) { + this.warnings = Warnings.createWarnings( + driverContext.warningsMode(), + source.source().getLineNumber(), + source.source().getColumnNumber(), + source.text() + ); + } + return warnings; + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory val; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val) { + this.source = source; + this.val = val; + } + + @Override + public ByteLengthEvaluator get(DriverContext context) { + return new ByteLengthEvaluator(source, val.get(context), context); + } + + @Override + public String toString() { + return "ByteLengthEvaluator[" + "val=" + val + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index b0111485adbe7..0d6af0ec3bbb1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -33,6 +33,11 @@ public enum Cap { */ FN_BIT_LENGTH, + /** + * Support for function {@code BYTE_LENGTH}. + */ + FN_BYTE_LENGTH, + /** * Support for function {@code REVERSE}. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 7a6ff79d79a65..d1aef0e46caca 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -118,6 +118,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; import org.elasticsearch.xpack.esql.expression.function.scalar.string.BitLength; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.EndsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; @@ -308,6 +309,7 @@ private FunctionDefinition[][] functions() { // string new FunctionDefinition[] { def(BitLength.class, BitLength::new, "bit_length"), + def(ByteLength.class, ByteLength::new, "byte_length"), def(Concat.class, Concat::new, "concat"), def(EndsWith.class, EndsWith::new, "ends_with"), def(LTrim.class, LTrim::new, "ltrim"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index e9ca69055658d..610fe1c5ea000 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -55,6 +55,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.AbstractMultivalueFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StX; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.StY; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; @@ -80,6 +81,7 @@ public static List getNamedWriteables() { entries.add(Acos.ENTRY); entries.add(Asin.ENTRY); entries.add(Atan.ENTRY); + entries.add(ByteLength.ENTRY); entries.add(Cbrt.ENTRY); entries.add(Ceil.ENTRY); entries.add(Cos.ENTRY); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/BitLength.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/BitLength.java index 5deb6fa7feba6..ad8b46df29df2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/BitLength.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/BitLength.java @@ -40,6 +40,7 @@ public class BitLength extends UnaryScalarFunction { @FunctionInfo( returnType = "integer", description = "Returns the bit length of a string.", + note = "All strings are in UTF-8, so a single character can use multiple bytes.", examples = @Example(file = "docs", tag = "bitLength") ) public BitLength( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLength.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLength.java new file mode 100644 index 0000000000000..f967b20b8be32 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLength.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.NodeInfo; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.Example; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isString; + +public class ByteLength extends UnaryScalarFunction { + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( + Expression.class, + "ByteLength", + ByteLength::new + ); + + @FunctionInfo( + returnType = "integer", + description = "Returns the byte length of a string.", + note = "All strings are in UTF-8, so a single character can use multiple bytes.", + examples = @Example(file = "eval", tag = "byteLength") + ) + public ByteLength( + Source source, + @Param( + name = "string", + type = { "keyword", "text" }, + description = "String expression. If `null`, the function returns `null`." + ) Expression field + ) { + super(source, field); + } + + private ByteLength(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ENTRY.name; + } + + @Override + public DataType dataType() { + return DataType.INTEGER; + } + + @Override + protected TypeResolution resolveType() { + return childrenResolved() ? isString(field(), sourceText(), DEFAULT) : new TypeResolution("Unresolved children"); + } + + @Evaluator + static int process(BytesRef val) { + return val.length; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new ByteLength(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ByteLength::new, field()); + } + + @Override + public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { + return new ByteLengthEvaluator.Factory(source(), toEvaluator.apply(field())); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Length.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Length.java index f4bb7f35cb466..3b442a8583a0a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Length.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Length.java @@ -34,6 +34,7 @@ public class Length extends UnaryScalarFunction { @FunctionInfo( returnType = "integer", description = "Returns the character length of a string.", + note = "All strings are in UTF-8, so a single character can use multiple bytes.", examples = @Example(file = "eval", tag = "length") ) public Length( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthSerializationTests.java new file mode 100644 index 0000000000000..98b5268797c8c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthSerializationTests.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.AbstractUnaryScalarSerializationTests; + +public class ByteLengthSerializationTests extends AbstractUnaryScalarSerializationTests { + @Override + protected ByteLength create(Source source, Expression child) { + return new ByteLength(source, child); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthTests.java new file mode 100644 index 0000000000000..866b8e0cd8da3 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ByteLengthTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.core.expression.Expression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; + +public class ByteLengthTests extends AbstractScalarFunctionTestCase { + public ByteLengthTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List cases = new ArrayList<>(); + cases.addAll(List.of(new TestCaseSupplier("byte length basic test", List.of(DataType.KEYWORD), () -> { + var s = randomAlphaOfLength(between(0, 10000)); + return testCase(s, DataType.KEYWORD, s.length()); + }))); + cases.addAll(makeTestCases("empty string", () -> "", 0)); + cases.addAll(makeTestCases("single ascii character", () -> "a", 1)); + cases.addAll(makeTestCases("ascii string", () -> "clump", 5)); + cases.addAll(makeTestCases("3 bytes, 1 code point", () -> "☕", 3)); + cases.addAll(makeTestCases("6 bytes, 2 code points", () -> "❗️", 6)); + cases.addAll(makeTestCases("100 random alpha", () -> randomAlphaOfLength(100), 100)); + return parameterSuppliersFromTypedDataWithDefaultChecks(ENTIRELY_NULL_PRESERVES_TYPE, cases, (v, p) -> "string"); + } + + private static List makeTestCases(String title, Supplier text, int expectedByteLength) { + return Stream.of(DataType.KEYWORD, DataType.TEXT, DataType.SEMANTIC_TEXT) + .map( + dataType -> new TestCaseSupplier( + title + " with " + dataType, + List.of(dataType), + () -> testCase(text.get(), dataType, expectedByteLength) + ) + ) + .toList(); + } + + @Override + protected Expression build(Source source, List args) { + assert args.size() == 1; + return new ByteLength(source, args.get(0)); + } + + private static TestCaseSupplier.TestCase testCase(String s, DataType dataType, int expectedByteLength) { + var bytesRef = new BytesRef(s); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(bytesRef, dataType, "f")), + "ByteLengthEvaluator[val=Attribute[channel=0]]", + DataType.INTEGER, + equalTo(expectedByteLength) + ); + } + + private static final boolean ENTIRELY_NULL_PRESERVES_TYPE = true; +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index 0e09809d16902..5a7547d011c0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.math.Abs; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ByteLength; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; @@ -115,6 +116,7 @@ public static List params() { new Pow(Source.EMPTY, DOUBLE1, DOUBLE2), DOUBLE1, literal, + new ByteLength(Source.EMPTY, literal), new Length(Source.EMPTY, literal), new DateFormat(Source.EMPTY, datePattern, DATE, TEST_CONFIG), new DateFormat(Source.EMPTY, datePattern, literal, TEST_CONFIG), diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index 6e7098da33805..4c3b16c5dc309 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -30,7 +30,7 @@ setup: - method: POST path: /_query parameters: [] - capabilities: [ snapshot_test_for_telemetry, fn_bit_length ] + capabilities: [ snapshot_test_for_telemetry, fn_byte_length ] reason: "Test that should only be executed on snapshot versions" - do: {xpack.usage: {}} @@ -91,7 +91,8 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} - - length: {esql.functions: 119} # check the "sister" test below for a likely update to the same esql.functions length check + # Testing for the entire function set isn't feasbile, so we just check that we return the correct count as an approximation. + - length: {esql.functions: 120} # check the "sister" test below for a likely update to the same esql.functions length check --- "Basic ESQL usage output (telemetry) non-snapshot version": @@ -101,7 +102,7 @@ setup: - method: POST path: /_query parameters: [] - capabilities: [ non_snapshot_test_for_telemetry, fn_bit_length ] + capabilities: [ non_snapshot_test_for_telemetry, fn_byte_length ] reason: "Test that should only be executed on release versions" - do: {xpack.usage: {}} @@ -162,4 +163,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} - - length: {esql.functions: 116} # check the "sister" test above for a likely update to the same esql.functions length check + - length: {esql.functions: 117} # check the "sister" test above for a likely update to the same esql.functions length check From 5204902c4dd60f9364d43441f987f04ecd3a2f2e Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 13 Nov 2024 13:57:24 +1100 Subject: [PATCH 70/95] [Test] Enable logging for AmazonHttpClient (#116560) If sending request fails locally without reaching the server, the retryable exception is logged differently. This PR enables the logging for this scenario. Relates: #88841 Relates: #101608 --- .../repositories/s3/S3BlobStoreRepositoryTests.java | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 6b4dd5ed86e2d..bb8a452e21771 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -188,7 +188,10 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { } @Override - @TestIssueLogging(issueUrl = "https://github.com/elastic/elasticsearch/issues/88841", value = "com.amazonaws.request:DEBUG") + @TestIssueLogging( + issueUrl = "https://github.com/elastic/elasticsearch/issues/88841", + value = "com.amazonaws.request:DEBUG,com.amazonaws.http.AmazonHttpClient:TRACE" + ) public void testRequestStats() throws Exception { super.testRequestStats(); } @@ -234,7 +237,10 @@ public void testAbortRequestStats() throws Exception { assertEquals(assertionErrorMsg, mockCalls, sdkRequestCounts); } - @TestIssueLogging(issueUrl = "https://github.com/elastic/elasticsearch/issues/101608", value = "com.amazonaws.request:DEBUG") + @TestIssueLogging( + issueUrl = "https://github.com/elastic/elasticsearch/issues/101608", + value = "com.amazonaws.request:DEBUG,com.amazonaws.http.AmazonHttpClient:TRACE" + ) public void testMetrics() throws Exception { // Create the repository and perform some activities final String repository = createRepository(randomRepositoryName(), false); From 5b25dee334e81c7f706367375010198a3c80d68b Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Wed, 13 Nov 2024 10:21:37 +0200 Subject: [PATCH 71/95] Propagating nested inner_hits to the parent compound retriever (#116408) --- docs/changelog/116408.yaml | 6 + .../search/nested/SimpleNestedIT.java | 60 ++++++++++ .../org/elasticsearch/TransportVersions.java | 2 +- .../query}/RankDocsQueryBuilder.java | 19 ++- .../action/search/SearchCapabilities.java | 3 + .../elasticsearch/search/SearchModule.java | 2 +- .../elasticsearch/search/SearchService.java | 8 +- .../search/builder/SearchSourceBuilder.java | 29 ++++- .../retriever/CompoundRetrieverBuilder.java | 7 +- .../search/retriever/KnnRetrieverBuilder.java | 2 +- .../retriever/RankDocsRetrieverBuilder.java | 2 +- .../retriever/rankdoc/RankDocsQuery.java | 2 +- .../query}/RankDocsQueryBuilderTests.java | 5 +- ...bstractRankDocWireSerializingTestCase.java | 2 +- .../KnnRetrieverBuilderParsingTests.java | 2 +- .../RankDocsRetrieverBuilderTests.java | 2 +- .../retriever/QueryRuleRetrieverBuilder.java | 12 +- .../TextSimilarityRankRetrieverBuilder.java | 14 +-- ...rrf_retriever_search_api_compatibility.yml | 111 ++++++++++++++++++ 19 files changed, 248 insertions(+), 42 deletions(-) create mode 100644 docs/changelog/116408.yaml rename server/src/main/java/org/elasticsearch/{search/retriever/rankdoc => index/query}/RankDocsQueryBuilder.java (91%) rename server/src/test/java/org/elasticsearch/{search/retriever/rankdoc => index/query}/RankDocsQueryBuilderTests.java (98%) diff --git a/docs/changelog/116408.yaml b/docs/changelog/116408.yaml new file mode 100644 index 0000000000000..5f4c8459778a6 --- /dev/null +++ b/docs/changelog/116408.yaml @@ -0,0 +1,6 @@ +pr: 116408 +summary: Propagating nested `inner_hits` to the parent compound retriever +area: Ranking +type: bug +issues: + - 116397 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java index 2fde645f0036b..4688201c66201 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/SimpleNestedIT.java @@ -21,7 +21,9 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.NestedSortBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortMode; @@ -1581,6 +1583,64 @@ public void testCheckFixedBitSetCache() throws Exception { assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0L)); } + public void testSkipNestedInnerHits() throws Exception { + assertAcked(prepareCreate("test").setMapping("nested1", "type=nested")); + ensureGreen(); + + prepareIndex("test").setId("1") + .setSource( + jsonBuilder().startObject() + .field("field1", "value1") + .startArray("nested1") + .startObject() + .field("n_field1", "foo") + .field("n_field2", "bar") + .endObject() + .endArray() + .endObject() + ) + .get(); + + waitForRelocation(ClusterHealthStatus.GREEN); + GetResponse getResponse = client().prepareGet("test", "1").get(); + assertThat(getResponse.isExists(), equalTo(true)); + assertThat(getResponse.getSourceAsBytesRef(), notNullValue()); + refresh(); + + assertNoFailuresAndResponse( + prepareSearch("test").setSource( + new SearchSourceBuilder().query( + QueryBuilders.nestedQuery("nested1", QueryBuilders.termQuery("nested1.n_field1", "foo"), ScoreMode.Avg) + .innerHit(new InnerHitBuilder()) + ) + ), + res -> { + assertNotNull(res.getHits()); + assertHitCount(res, 1); + assertThat(res.getHits().getHits().length, equalTo(1)); + // by default we should get inner hits + assertNotNull(res.getHits().getHits()[0].getInnerHits()); + assertNotNull(res.getHits().getHits()[0].getInnerHits().get("nested1")); + } + ); + + assertNoFailuresAndResponse( + prepareSearch("test").setSource( + new SearchSourceBuilder().query( + QueryBuilders.nestedQuery("nested1", QueryBuilders.termQuery("nested1.n_field1", "foo"), ScoreMode.Avg) + .innerHit(new InnerHitBuilder()) + ).skipInnerHits(true) + ), + res -> { + assertNotNull(res.getHits()); + assertHitCount(res, 1); + assertThat(res.getHits().getHits().length, equalTo(1)); + // if we explicitly say to ignore inner hits, then this should now be null + assertNull(res.getHits().getHits()[0].getInnerHits()); + } + ); + } + private void assertDocumentCount(String index, long numdocs) { IndicesStatsResponse stats = indicesAdmin().prepareStats(index).clear().setDocs(true).get(); assertNoFailures(stats); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 6e62845383a14..3815d1bba18c3 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -194,7 +194,7 @@ static TransportVersion def(int id) { public static final TransportVersion DATA_STREAM_INDEX_VERSION_DEPRECATION_CHECK = def(8_788_00_0); public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO = def(8_789_00_0); public static final TransportVersion VERTEX_AI_INPUT_TYPE_ADDED = def(8_790_00_0); - + public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE = def(8_791_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java similarity index 91% rename from server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java rename to server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java index 1539be9a46ab9..33077697a2ce6 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java @@ -7,7 +7,7 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.search.retriever.rankdoc; +package org.elasticsearch.index.query; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Query; @@ -16,15 +16,13 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.query.AbstractQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQuery; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Arrays; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.TransportVersions.RRF_QUERY_REWRITE; @@ -55,6 +53,15 @@ public RankDocsQueryBuilder(StreamInput in) throws IOException { } } + @Override + protected void extractInnerHitBuilders(Map innerHits) { + if (queryBuilders != null) { + for (QueryBuilder query : queryBuilders) { + InnerHitContextBuilder.extractInnerHits(query, innerHits); + } + } + } + @Override protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { if (queryBuilders != null) { @@ -71,7 +78,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws return super.doRewrite(queryRewriteContext); } - RankDoc[] rankDocs() { + public RankDoc[] rankDocs() { return rankDocs; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java b/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java index 338dabb23ab4f..3bc1c467323a3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/SearchCapabilities.java @@ -36,6 +36,8 @@ private SearchCapabilities() {} private static final String KQL_QUERY_SUPPORTED = "kql_query"; /** Support multi-dense-vector field mapper. */ private static final String MULTI_DENSE_VECTOR_FIELD_MAPPER = "multi_dense_vector_field_mapper"; + /** Support propagating nested retrievers' inner_hits to top-level compound retrievers . */ + private static final String NESTED_RETRIEVER_INNER_HITS_SUPPORT = "nested_retriever_inner_hits_support"; public static final Set CAPABILITIES; static { @@ -45,6 +47,7 @@ private SearchCapabilities() {} capabilities.add(BYTE_FLOAT_BIT_DOT_PRODUCT_CAPABILITY); capabilities.add(DENSE_VECTOR_DOCVALUE_FIELDS); capabilities.add(TRANSFORM_RANK_RRF_TO_RETRIEVER); + capabilities.add(NESTED_RETRIEVER_INNER_HITS_SUPPORT); if (MultiDenseVectorFieldMapper.FEATURE_FLAG.isEnabled()) { capabilities.add(MULTI_DENSE_VECTOR_FIELD_MAPPER); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 7a8b4e0cfe95a..b8f50c6f9a62f 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -52,6 +52,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.index.query.RegexpQueryBuilder; import org.elasticsearch.index.query.ScriptQueryBuilder; import org.elasticsearch.index.query.SimpleQueryStringBuilder; @@ -238,7 +239,6 @@ import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; import org.elasticsearch.search.retriever.StandardRetrieverBuilder; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.GeoDistanceSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index be96b4e25d841..a11c4013a9c9b 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1285,13 +1285,17 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc ); if (query != null) { QueryBuilder rewrittenForInnerHits = Rewriteable.rewrite(query, innerHitsRewriteContext, true); - InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); + if (false == source.skipInnerHits()) { + InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); + } searchExecutionContext.setAliasFilter(context.request().getAliasFilter().getQueryBuilder()); context.parsedQuery(searchExecutionContext.toQuery(query)); } if (source.postFilter() != null) { QueryBuilder rewrittenForInnerHits = Rewriteable.rewrite(source.postFilter(), innerHitsRewriteContext, true); - InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); + if (false == source.skipInnerHits()) { + InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); + } context.parsedPostFilter(searchExecutionContext.toQuery(source.postFilter())); } if (innerHitBuilders.size() > 0) { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index cb5e841a3df77..699c39a652f15 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -214,6 +214,8 @@ public static HighlightBuilder highlight() { private Map runtimeMappings = emptyMap(); + private boolean skipInnerHits = false; + /** * Constructs a new search source builder. */ @@ -290,6 +292,11 @@ public SearchSourceBuilder(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { rankBuilder = in.readOptionalNamedWriteable(RankBuilder.class); } + if (in.getTransportVersion().onOrAfter(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE)) { + skipInnerHits = in.readBoolean(); + } else { + skipInnerHits = false; + } } @Override @@ -379,6 +386,9 @@ public void writeTo(StreamOutput out) throws IOException { } else if (rankBuilder != null) { throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion().toReleaseVersion() + "]"); } + if (out.getTransportVersion().onOrAfter(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE)) { + out.writeBoolean(skipInnerHits); + } } /** @@ -1280,6 +1290,7 @@ private SearchSourceBuilder shallowCopy( rewrittenBuilder.collapse = collapse; rewrittenBuilder.pointInTimeBuilder = pointInTimeBuilder; rewrittenBuilder.runtimeMappings = runtimeMappings; + rewrittenBuilder.skipInnerHits = skipInnerHits; return rewrittenBuilder; } @@ -1838,6 +1849,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t if (false == runtimeMappings.isEmpty()) { builder.field(RUNTIME_MAPPINGS_FIELD.getPreferredName(), runtimeMappings); } + if (skipInnerHits) { + builder.field("skipInnerHits", true); + } return builder; } @@ -1850,6 +1864,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public SearchSourceBuilder skipInnerHits(boolean skipInnerHits) { + this.skipInnerHits = skipInnerHits; + return this; + } + + public boolean skipInnerHits() { + return this.skipInnerHits; + } + public static class IndexBoost implements Writeable, ToXContentObject { private final String index; private final float boost; @@ -2104,7 +2127,8 @@ public int hashCode() { collapse, trackTotalHitsUpTo, pointInTimeBuilder, - runtimeMappings + runtimeMappings, + skipInnerHits ); } @@ -2149,7 +2173,8 @@ public boolean equals(Object obj) { && Objects.equals(collapse, other.collapse) && Objects.equals(trackTotalHitsUpTo, other.trackTotalHitsUpTo) && Objects.equals(pointInTimeBuilder, other.pointInTimeBuilder) - && Objects.equals(runtimeMappings, other.runtimeMappings); + && Objects.equals(runtimeMappings, other.runtimeMappings) + && Objects.equals(skipInnerHits, other.skipInnerHits); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java index b15798db95b6f..db839de9f573a 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java @@ -236,7 +236,7 @@ public int doHashCode() { return Objects.hash(innerRetrievers); } - protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { + protected final SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { var sourceBuilder = new SearchSourceBuilder().pointInTimeBuilder(pit) .trackTotalHits(false) .storedFields(new StoredFieldsContext(false)) @@ -254,6 +254,11 @@ protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, } sortBuilders.add(new FieldSortBuilder(FieldSortBuilder.SHARD_DOC_FIELD_NAME)); sourceBuilder.sort(sortBuilders); + sourceBuilder.skipInnerHits(true); + return finalizeSourceBuilder(sourceBuilder); + } + + protected SearchSourceBuilder finalizeSourceBuilder(SearchSourceBuilder sourceBuilder) { return sourceBuilder; } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java index facda1a30a5ac..8be9a78dae154 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java @@ -15,8 +15,8 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.search.vectors.ExactKnnQueryBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.search.vectors.QueryVectorBuilder; diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java index 535db5c8fe28e..02f890f51d011 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java @@ -12,9 +12,9 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java index 2cb960e7e73cb..ebbdf58cc8c4f 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQuery.java @@ -283,7 +283,7 @@ private static int[] findSegmentStarts(IndexReader reader, RankDoc[] docs) { return starts; } - RankDoc[] rankDocs() { + public RankDoc[] rankDocs() { return docs; } diff --git a/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RankDocsQueryBuilderTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java rename to server/src/test/java/org/elasticsearch/index/query/RankDocsQueryBuilderTests.java index e8f88f3297b78..ba39702d3d162 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/rankdoc/RankDocsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RankDocsQueryBuilderTests.java @@ -7,7 +7,7 @@ * License v3.0 only", or the "Server Side Public License, v 1". */ -package org.elasticsearch.search.retriever.rankdoc; +package org.elasticsearch.index.query; import org.apache.lucene.document.Document; import org.apache.lucene.document.NumericDocValuesField; @@ -22,9 +22,8 @@ import org.apache.lucene.search.TopScoreDocCollectorManager; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.rank.RankDoc; +import org.elasticsearch.search.retriever.rankdoc.RankDocsQuery; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/rank/AbstractRankDocWireSerializingTestCase.java b/server/src/test/java/org/elasticsearch/search/rank/AbstractRankDocWireSerializingTestCase.java index d0c85a33acf09..8cc40570ab4bb 100644 --- a/server/src/test/java/org/elasticsearch/search/rank/AbstractRankDocWireSerializingTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/rank/AbstractRankDocWireSerializingTestCase.java @@ -12,8 +12,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java index b0bf7e6636498..7923cb5f0d918 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/KnnRetrieverBuilderParsingTests.java @@ -17,11 +17,11 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.usage.SearchUsage; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java index 384564ac01e2a..af6782c45dce8 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java @@ -13,11 +13,11 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.RandomQueryBuilder; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.test.ESTestCase; import java.io.IOException; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java index 9ef2f630b50bd..54a89d061de35 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java @@ -11,15 +11,14 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RankDocsQueryBuilder; import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.retriever.CompoundRetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverBuilderWrapper; import org.elasticsearch.search.retriever.RetrieverParserContext; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -129,11 +128,10 @@ public int rankWindowSize() { } @Override - protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { - var ret = super.createSearchSourceBuilder(pit, retrieverBuilder); - checkValidSort(ret.sorts()); - ret.query(new RuleQueryBuilder(ret.query(), matchCriteria, rulesetIds)); - return ret; + protected SearchSourceBuilder finalizeSourceBuilder(SearchSourceBuilder source) { + checkValidSort(source.sorts()); + source.query(new RuleQueryBuilder(source.query(), matchCriteria, rulesetIds)); + return source; } private static void checkValidSort(List> sortBuilders) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index 91b6cdc61afe4..c239319b6283a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -12,9 +12,7 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.retriever.CompoundRetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; @@ -157,17 +155,7 @@ protected RankDoc[] combineInnerRetrieverResults(List rankResults) { } @Override - protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { - var sourceBuilder = new SearchSourceBuilder().pointInTimeBuilder(pit) - .trackTotalHits(false) - .storedFields(new StoredFieldsContext(false)) - .size(rankWindowSize); - // apply the pre-filters downstream once - if (preFilterQueryBuilders.isEmpty() == false) { - retrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); - } - retrieverBuilder.extractToSearchSourceBuilder(sourceBuilder, true); - + protected SearchSourceBuilder finalizeSourceBuilder(SearchSourceBuilder sourceBuilder) { sourceBuilder.rankBuilder( new TextSimilarityRankBuilder(this.field, this.inferenceId, this.inferenceText, this.rankWindowSize, this.minScore) ); diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml index f3914843b80ec..42c01f0b9636c 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml @@ -35,6 +35,16 @@ setup: properties: views: type: long + nested_inner_hits: + type: nested + properties: + data: + type: keyword + paragraph_id: + type: dense_vector + dims: 1 + index: true + similarity: l2_norm - do: index: @@ -125,6 +135,16 @@ setup: integer: 2 keyword: "technology" nested: { views: 10} + nested_inner_hits: [{"data": "foo"}, {"data": "bar"}, {"data": "baz"}] + + - do: + index: + index: test + id: "10" + body: + id: 10 + integer: 3 + nested_inner_hits: [ {"data": "foo", "paragraph_id": [1]}] - do: indices.refresh: {} @@ -960,3 +980,94 @@ setup: - length: { hits.hits : 1 } - match: { hits.hits.0._id: "1" } + +--- +"rrf retriever with inner_hits for sub-retriever": + - requires: + capabilities: + - method: POST + path: /_search + capabilities: [ nested_retriever_inner_hits_support ] + test_runner_features: capabilities + reason: "Support for propagating nested retrievers' inner hits to the top-level compound retriever is required" + + - do: + search: + _source: false + index: test + body: + retriever: + rrf: + retrievers: [ + { + # this will return doc 9 and doc 10 + standard: { + query: { + nested: { + path: nested_inner_hits, + inner_hits: { + name: nested_data_field, + _source: false, + "sort": [ { + "nested_inner_hits.data": "asc" + } + ], + fields: [ nested_inner_hits.data ] + }, + query: { + match_all: { } + } + } + } + } + }, + { + # this will return doc 10 + standard: { + query: { + nested: { + path: nested_inner_hits, + inner_hits: { + name: nested_vector_field, + _source: false, + size: 1, + "fields": [ "nested_inner_hits.paragraph_id" ] + }, + query: { + knn: { + field: nested_inner_hits.paragraph_id, + query_vector: [ 1 ], + num_candidates: 10 + } + } + } + } + } + }, + { + standard: { + query: { + match_all: { } + } + } + } + ] + rank_window_size: 10 + rank_constant: 10 + size: 3 + + - match: { hits.total.value: 10 } + + - match: { hits.hits.0.inner_hits.nested_data_field.hits.total.value: 1 } + - match: { hits.hits.0.inner_hits.nested_data_field.hits.hits.0.fields.nested_inner_hits.0.data.0: foo } + - match: { hits.hits.0.inner_hits.nested_vector_field.hits.total.value: 1 } + - match: { hits.hits.0.inner_hits.nested_vector_field.hits.hits.0.fields.nested_inner_hits.0.paragraph_id: [ 1 ] } + + - match: { hits.hits.1.inner_hits.nested_data_field.hits.total.value: 3 } + - match: { hits.hits.1.inner_hits.nested_data_field.hits.hits.0.fields.nested_inner_hits.0.data.0: bar } + - match: { hits.hits.1.inner_hits.nested_data_field.hits.hits.1.fields.nested_inner_hits.0.data.0: baz } + - match: { hits.hits.1.inner_hits.nested_data_field.hits.hits.2.fields.nested_inner_hits.0.data.0: foo } + - match: { hits.hits.1.inner_hits.nested_vector_field.hits.total.value: 0 } + + - match: { hits.hits.2.inner_hits.nested_data_field.hits.total.value: 0 } + - match: { hits.hits.2.inner_hits.nested_vector_field.hits.total.value: 0 } From d702919fdb092126fdeddd877b8eeae0a3c37bb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Wed, 13 Nov 2024 09:36:40 +0100 Subject: [PATCH 72/95] [Entitlements] External IT test for checkSystemExit (#116435) --- libs/entitlement/bridge/build.gradle | 11 +++- qa/entitlements/build.gradle | 42 +++++++++++++++ .../test/entitlements/EntitlementsIT.java | 52 +++++++++++++++++++ .../src/main/java/module-info.java | 5 ++ .../entitlements/EntitlementsCheckPlugin.java | 47 +++++++++++++++++ ...RestEntitlementsCheckSystemExitAction.java | 46 ++++++++++++++++ .../bootstrap/Elasticsearch.java | 2 + 7 files changed, 204 insertions(+), 1 deletion(-) create mode 100644 qa/entitlements/build.gradle create mode 100644 qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java create mode 100644 qa/entitlements/src/main/java/module-info.java create mode 100644 qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java create mode 100644 qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java diff --git a/libs/entitlement/bridge/build.gradle b/libs/entitlement/bridge/build.gradle index dff5fac1e1c1f..3d59dd3eaf33e 100644 --- a/libs/entitlement/bridge/build.gradle +++ b/libs/entitlement/bridge/build.gradle @@ -9,8 +9,17 @@ apply plugin: 'elasticsearch.build' +configurations { + bridgeJar { + canBeConsumed = true + canBeResolved = false + } +} + +artifacts { + bridgeJar(jar) +} tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' } - diff --git a/qa/entitlements/build.gradle b/qa/entitlements/build.gradle new file mode 100644 index 0000000000000..2621d2731f411 --- /dev/null +++ b/qa/entitlements/build.gradle @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +apply plugin: 'elasticsearch.base-internal-es-plugin' +apply plugin: 'elasticsearch.internal-java-rest-test' +// Necessary to use tests in Serverless +apply plugin: 'elasticsearch.internal-test-artifact' + +esplugin { + name 'entitlement-qa' + description 'A test module that triggers entitlement checks' + classname 'org.elasticsearch.test.entitlements.EntitlementsCheckPlugin' +} + +configurations { + entitlementBridge { + canBeConsumed = false + } +} + +dependencies { + clusterPlugins project(':qa:entitlements') + entitlementBridge project(':libs:entitlement:bridge') +} + +tasks.named('javaRestTest') { + systemProperty "tests.entitlement-bridge.jar-name", configurations.entitlementBridge.singleFile.getName() + usesDefaultDistribution() + systemProperty "tests.security.manager", "false" +} + +tasks.named("javadoc").configure { + // There seems to be some problem generating javadoc on a QA project that has a module definition + enabled = false +} + diff --git a/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java b/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java new file mode 100644 index 0000000000000..a62add89c51e6 --- /dev/null +++ b/qa/entitlements/src/javaRestTest/java/org/elasticsearch/test/entitlements/EntitlementsIT.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.entitlements; + +import org.elasticsearch.client.Request; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + +@ESTestCase.WithoutSecurityManager +public class EntitlementsIT extends ESRestTestCase { + + private static final String ENTITLEMENT_BRIDGE_JAR_NAME = System.getProperty("tests.entitlement-bridge.jar-name"); + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.INTEG_TEST) + .plugin("entitlement-qa") + .systemProperty("es.entitlements.enabled", "true") + .setting("xpack.security.enabled", "false") + .jvmArg("-Djdk.attach.allowAttachSelf=true") + .jvmArg("-XX:+EnableDynamicAgentLoading") + .jvmArg("--patch-module=java.base=lib/entitlement-bridge/" + ENTITLEMENT_BRIDGE_JAR_NAME) + .jvmArg("--add-exports=java.base/org.elasticsearch.entitlement.bridge=org.elasticsearch.entitlement") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testCheckSystemExit() { + var exception = expectThrows( + IOException.class, + () -> { client().performRequest(new Request("GET", "/_entitlement/_check_system_exit")); } + ); + assertThat(exception.getMessage(), containsString("not_entitled_exception")); + } +} diff --git a/qa/entitlements/src/main/java/module-info.java b/qa/entitlements/src/main/java/module-info.java new file mode 100644 index 0000000000000..cf33ff95d834c --- /dev/null +++ b/qa/entitlements/src/main/java/module-info.java @@ -0,0 +1,5 @@ +module elasticsearch.qa.entitlements { + requires org.elasticsearch.server; + requires org.elasticsearch.base; + requires org.apache.logging.log4j; +} diff --git a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java new file mode 100644 index 0000000000000..f3821c065eceb --- /dev/null +++ b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/EntitlementsCheckPlugin.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ +package org.elasticsearch.test.entitlements; + +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; + +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class EntitlementsCheckPlugin extends Plugin implements ActionPlugin { + + @Override + @SuppressForbidden(reason = "Specifically testing System.exit") + public List getRestHandlers( + final Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + final RestController restController, + final ClusterSettings clusterSettings, + final IndexScopedSettings indexScopedSettings, + final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return Collections.singletonList(new RestEntitlementsCheckSystemExitAction()); + } +} diff --git a/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java new file mode 100644 index 0000000000000..692c8728cbda0 --- /dev/null +++ b/qa/entitlements/src/main/java/org/elasticsearch/test/entitlements/RestEntitlementsCheckSystemExitAction.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.entitlements; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestEntitlementsCheckSystemExitAction extends BaseRestHandler { + + private static final Logger logger = LogManager.getLogger(RestEntitlementsCheckSystemExitAction.class); + + RestEntitlementsCheckSystemExitAction() {} + + @Override + public List routes() { + return List.of(new Route(GET, "/_entitlement/_check_system_exit")); + } + + @Override + public String getName() { + return "check_system_exit_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + logger.info("RestEntitlementsCheckSystemExitAction rest handler"); + return channel -> { + logger.info("Calling System.exit(123);"); + System.exit(123); + }; + } +} diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 236baf89a04e9..2a83f749e7d33 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -200,9 +200,11 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { ); if (Boolean.parseBoolean(System.getProperty("es.entitlements.enabled"))) { + logger.info("Bootstrapping Entitlements"); EntitlementBootstrap.bootstrap(); } else { // install SM after natives, shutdown hooks, etc. + logger.info("Bootstrapping java SecurityManager"); org.elasticsearch.bootstrap.Security.configure( nodeEnv, SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(args.nodeSettings()), From 103a8b0960f5542367adf84fde3b3f1896e6c225 Mon Sep 17 00:00:00 2001 From: Niels Bauman <33722607+nielsbauman@users.noreply.github.com> Date: Wed, 13 Nov 2024 06:33:14 -0300 Subject: [PATCH 73/95] Avoid ignoring yaml tests for retrieving index templates (#116446) The `skip` caused the tests to be ignored instead of included. --- .../test/indices.get_index_template/10_basic.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml index 2079c01079ce1..c47df413df9e7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_index_template/10_basic.yml @@ -1,8 +1,8 @@ setup: - - skip: + - requires: cluster_features: [ "gte_v7.8.0" ] reason: "index template v2 API unavailable before 7.8" - features: allowed_warnings + test_runner_features: allowed_warnings - do: allowed_warnings: @@ -92,10 +92,9 @@ setup: --- "Add data stream lifecycle": - - skip: + - requires: cluster_features: ["gte_v8.11.0"] reason: "Data stream lifecycle in index templates was updated after 8.10" - features: allowed_warnings - do: allowed_warnings: @@ -127,10 +126,9 @@ setup: --- "Get data stream lifecycle with default rollover": - - skip: + - requires: cluster_features: ["gte_v8.11.0"] reason: "Data stream lifecycle in index templates was updated after 8.10" - features: allowed_warnings - do: allowed_warnings: From 799e1a750e2892f4257d7011dd2f7582b94f20bd Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Wed, 13 Nov 2024 10:06:39 +0000 Subject: [PATCH 74/95] Remove BWC in `UpdateWatcherSettingsAction` (#116686) This removes code which was providing transport compatibility with pre-8.15 nodes, which is not needed for 9.0. At this point, the `readFrom` method is a one-liner, so it is inlined, making the constructor public (which is more conventional). --- .../put/UpdateWatcherSettingsAction.java | 22 ++----------------- .../TransportUpdateWatcherSettingsAction.java | 2 +- 2 files changed, 3 insertions(+), 21 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java index 7b0bd8a8108e9..815f6f0741440 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.core.watcher.transport.actions.put; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ValidateActions; @@ -17,7 +16,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import java.io.IOException; import java.util.Map; @@ -56,30 +54,14 @@ public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, Map Date: Wed, 13 Nov 2024 11:27:04 +0100 Subject: [PATCH 75/95] ESQL: optimise aggregations filtered by false/null into evals (#115858) This adds a new optimiser rule to extract aggregate functions filtered by a `FALSE` or `NULL` into evals. The value taken by the evaluation is `0L`, for `COUNT()` and `COUNT_DISTINCT()`, `NULL` otherwise. Example: ``` ... | STATS x = someAgg(y) WHERE FALSE {BY z} | ... => ... | STATS x = someAgg(y) {BY z} > | EVAL x = NULL | KEEP x{, z} | ... ``` Related: #114352. --- docs/changelog/115858.yaml | 5 + .../src/main/resources/stats.csv-spec | 110 +++++++ .../optimizer/LocalLogicalPlanOptimizer.java | 27 +- .../esql/optimizer/LogicalPlanOptimizer.java | 2 + .../ReplaceStatsFilteredAggWithEval.java | 88 ++++++ .../xpack/esql/rule/RuleExecutor.java | 4 + .../optimizer/LogicalPlanOptimizerTests.java | 276 ++++++++++++++++++ 7 files changed, 505 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/115858.yaml create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsFilteredAggWithEval.java diff --git a/docs/changelog/115858.yaml b/docs/changelog/115858.yaml new file mode 100644 index 0000000000000..0c0408fa656f8 --- /dev/null +++ b/docs/changelog/115858.yaml @@ -0,0 +1,5 @@ +pr: 115858 +summary: "ESQL: optimise aggregations filtered by false/null into evals" +area: ES|QL +type: enhancement +issues: [] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 448ee57b34c58..96aa779ad38c3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -2382,6 +2382,116 @@ max:integer |max_a:integer|min:integer | min_a:integer 74999 |null |25324 | null ; +statsWithAllFiltersFalse +required_capability: per_agg_filtering +from employees +| stats max = max(height.float) where false, + min = min(height.float) where to_string(null) == "abc", + count = count(height.float) where false, + count_distinct = count_distinct(salary) where to_string(null) == "def" +; + +max:double |min:double |count:long |count_distinct:long +null |null |0 |0 +; + +statsWithExpressionsAllFiltersFalse +required_capability: per_agg_filtering +from employees +| stats max = max(height.float + 1) where null, + count = count(height.float) + 2 where false, + mix = min(height.float + 1) + count_distinct(emp_no) + 2 where length(null) == 3 +; + +max:double |count:long |mix:double +null |2 |null +; + +statsWithFalseFilterAndGroup +required_capability: per_agg_filtering +from employees +| stats max = max(height.float + 1) where null, + count = count(height.float) + 2 where false + by job_positions +| sort job_positions +| limit 4 +; + +max:double |count:long |job_positions:keyword +null |2 |Accountant +null |2 |Architect +null |2 |Business Analyst +null |2 |Data Scientist +; + +statsWithFalseFiltersAndGroups +required_capability: per_agg_filtering +from employees +| eval my_length = length(concat(first_name, null)) +| stats count_distinct = count_distinct(height.float + 1) where null, + count = count(height.float) + 2 where false, + values = values(first_name) where my_length > 3 + by job_positions, is_rehired +| sort job_positions, is_rehired +| limit 10 +; + +count_distinct:long |count:long |values:keyword |job_positions:keyword |is_rehired:boolean +0 |2 |null |Accountant |false +0 |2 |null |Accountant |true +0 |2 |null |Accountant |null +0 |2 |null |Architect |false +0 |2 |null |Architect |true +0 |2 |null |Architect |null +0 |2 |null |Business Analyst |false +0 |2 |null |Business Analyst |true +0 |2 |null |Business Analyst |null +0 |2 |null |Data Scientist |false +; + +statsWithMixedFiltersAndGroup +required_capability: per_agg_filtering +from employees +| eval my_length = length(concat(first_name, null)) +| stats count = count(my_length) where false, + values = mv_slice(mv_sort(values(first_name)), 0, 1) + by job_positions +| sort job_positions +| limit 4 +; + +count:long |values:keyword |job_positions:keyword +0 |[Arumugam, Bojan] |Accountant +0 |[Alejandro, Charlene] |Architect +0 |[Basil, Breannda] |Business Analyst +0 |[Berni, Breannda] |Data Scientist +; + +prunedStatsFollowedByStats +from employees +| eval my_length = length(concat(first_name, null)) +| stats count = count(my_length) where false, + values = mv_slice(values(first_name), 0, 1) where my_length > 0 +| stats count_distinct = count_distinct(count) +; + +count_distinct:long +1 +; + +statsWithFalseFiltersFromRow +required_capability: per_agg_filtering +row x = null, a = 1, b = [2,3,4] +| stats c=max(a) where x + by b +; + +c:integer |b:integer +null |2 +null |3 +null |4 +; + statsWithBasicExpressionFiltered required_capability: per_agg_filtering from employees diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java index 44334ff112bad..3da07e9485af7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.xpack.esql.optimizer.rules.logical.PropagateEmptyRelation; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceStatsFilteredAggWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.InferIsNotNull; import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.InferNonNullAggConstraint; import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.LocalPropagateEmptyRelation; @@ -15,6 +16,7 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.local.ReplaceTopNWithLimitAndSort; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.rule.ParameterizedRuleExecutor; +import org.elasticsearch.xpack.esql.rule.Rule; import java.util.ArrayList; import java.util.List; @@ -50,20 +52,31 @@ protected List> batches() { rules.add(local); // TODO: if the local rules haven't touched the tree, the rest of the rules can be skipped rules.addAll(asList(operators(), cleanup())); - replaceRules(rules); - return rules; + return replaceRules(rules); } + @SuppressWarnings("unchecked") private List> replaceRules(List> listOfRules) { - for (Batch batch : listOfRules) { + List> newBatches = new ArrayList<>(listOfRules.size()); + for (var batch : listOfRules) { var rules = batch.rules(); - for (int i = 0; i < rules.length; i++) { - if (rules[i] instanceof PropagateEmptyRelation) { - rules[i] = new LocalPropagateEmptyRelation(); + List> newRules = new ArrayList<>(rules.length); + boolean updated = false; + for (var r : rules) { + if (r instanceof PropagateEmptyRelation) { + newRules.add(new LocalPropagateEmptyRelation()); + updated = true; + } else if (r instanceof ReplaceStatsFilteredAggWithEval) { + // skip it: once a fragment contains an Agg, this can no longer be pruned, which the rule can do + updated = true; + } else { + newRules.add(r); } } + batch = updated ? batch.with(newRules.toArray(Rule[]::new)) : batch; + newBatches.add(batch); } - return listOfRules; + return newBatches; } public LogicalPlan localOptimize(LogicalPlan plan) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 77c5a494437ab..a0e257d1a8953 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -46,6 +46,7 @@ import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceLimitAndSortAsTopN; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceOrderByExpressionWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceRegexMatch; +import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceStatsFilteredAggWithEval; import org.elasticsearch.xpack.esql.optimizer.rules.logical.ReplaceTrivialTypeConversions; import org.elasticsearch.xpack.esql.optimizer.rules.logical.SetAsOptimized; import org.elasticsearch.xpack.esql.optimizer.rules.logical.SimplifyComparisonsArithmetics; @@ -170,6 +171,7 @@ protected static Batch operators() { new CombineBinaryComparisons(), new CombineDisjunctions(), new SimplifyComparisonsArithmetics(DataType::areCompatible), + new ReplaceStatsFilteredAggWithEval(), // prune/elimination new PruneFilters(), new PruneColumns(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsFilteredAggWithEval.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsFilteredAggWithEval.java new file mode 100644 index 0000000000000..2cafcc2e07052 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/ReplaceStatsFilteredAggWithEval.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.optimizer.rules.logical; + +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.expression.Literal; +import org.elasticsearch.xpack.esql.core.expression.NamedExpression; +import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; +import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; +import org.elasticsearch.xpack.esql.plan.logical.Aggregate; +import org.elasticsearch.xpack.esql.plan.logical.Eval; +import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.esql.plan.logical.Project; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; +import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; + +import java.util.ArrayList; +import java.util.List; + +/** + * Replaces an aggregation function having a false/null filter with an EVAL node. + *
+ *     ... | STATS x = someAgg(y) WHERE FALSE {BY z} | ...
+ *     =>
+ *     ... | STATS x = someAgg(y) {BY z} > | EVAL x = NULL | KEEP x{, z} | ...
+ * 
+ */ +public class ReplaceStatsFilteredAggWithEval extends OptimizerRules.OptimizerRule { + @Override + protected LogicalPlan rule(Aggregate aggregate) { + int oldAggSize = aggregate.aggregates().size(); + List newAggs = new ArrayList<>(oldAggSize); + List newEvals = new ArrayList<>(oldAggSize); + List newProjections = new ArrayList<>(oldAggSize); + + for (var ne : aggregate.aggregates()) { + if (ne instanceof Alias alias + && alias.child() instanceof AggregateFunction aggFunction + && aggFunction.hasFilter() + && aggFunction.filter() instanceof Literal literal + && Boolean.FALSE.equals(literal.fold())) { + + Object value = aggFunction instanceof Count || aggFunction instanceof CountDistinct ? 0L : null; + Alias newAlias = alias.replaceChild(Literal.of(aggFunction, value)); + newEvals.add(newAlias); + newProjections.add(newAlias.toAttribute()); + } else { + newAggs.add(ne); // agg function unchanged or grouping key + newProjections.add(ne.toAttribute()); + } + } + + LogicalPlan plan = aggregate; + if (newEvals.isEmpty() == false) { + if (newAggs.isEmpty()) { // the Aggregate node is pruned + plan = localRelation(aggregate.source(), newEvals); + } else { + plan = aggregate.with(aggregate.child(), aggregate.groupings(), newAggs); + plan = new Eval(aggregate.source(), plan, newEvals); + plan = new Project(aggregate.source(), plan, newProjections); + } + } + return plan; + } + + private static LocalRelation localRelation(Source source, List newEvals) { + Block[] blocks = new Block[newEvals.size()]; + List attributes = new ArrayList<>(newEvals.size()); + for (int i = 0; i < newEvals.size(); i++) { + Alias alias = newEvals.get(i); + attributes.add(alias.toAttribute()); + blocks[i] = BlockUtils.constantBlock(PlannerUtils.NON_BREAKING_BLOCK_FACTORY, ((Literal) alias.child()).value(), 1); + } + return new LocalRelation(source, attributes, LocalSupplier.of(blocks)); + + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/rule/RuleExecutor.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/rule/RuleExecutor.java index 3d73c0d45e9a0..7df5a029d724e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/rule/RuleExecutor.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/rule/RuleExecutor.java @@ -68,6 +68,10 @@ public String name() { return name; } + public Batch with(Rule[] rules) { + return new Batch<>(name, limit, rules); + } + public Rule[] rules() { return rules; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index fdc4935d457e9..d9a0f9ad57fb1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.compute.aggregation.QuantileStates; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.LongVectorBlock; import org.elasticsearch.core.Tuple; import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.index.IndexMode; @@ -148,6 +150,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.GEO_POINT; import static org.elasticsearch.xpack.esql.core.type.DataType.INTEGER; import static org.elasticsearch.xpack.esql.core.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.esql.core.type.DataType.LONG; import static org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison.BinaryComparisonOperation.EQ; import static org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison.BinaryComparisonOperation.GT; import static org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison.BinaryComparisonOperation.GTE; @@ -166,6 +169,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; //@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") @@ -564,6 +568,278 @@ public void testStatsWithFilteringDefaultAliasing() { assertThat(Expressions.names(agg.aggregates()), contains("sum(salary)", "sum(salary) WheRe last_name == \"Doe\"")); } + /* + * Limit[1000[INTEGER]] + * \_LocalRelation[[sum(salary) where false{r}#26],[ConstantNullBlock[positions=1]]] + */ + public void testReplaceStatsFilteredAggWithEvalSingleAgg() { + var plan = plan(""" + from test + | stats sum(salary) where false + """); + + var project = as(plan, Limit.class); + var source = as(project.child(), LocalRelation.class); + assertThat(Expressions.names(source.output()), contains("sum(salary) where false")); + Block[] blocks = source.supplier().get(); + assertThat(blocks.length, is(1)); + assertThat(blocks[0].getPositionCount(), is(1)); + assertTrue(blocks[0].areAllValuesNull()); + } + + /* + * Project[[sum(salary) + 1 where false{r}#68]] + * \_Eval[[$$SUM$sum(salary)_+_1$0{r$}#79 + 1[INTEGER] AS sum(salary) + 1 where false]] + * \_Limit[1000[INTEGER]] + * \_LocalRelation[[$$SUM$sum(salary)_+_1$0{r$}#79],[ConstantNullBlock[positions=1]]] + */ + public void testReplaceStatsFilteredAggWithEvalSingleAggWithExpression() { + var plan = plan(""" + from test + | stats sum(salary) + 1 where false + """); + + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("sum(salary) + 1 where false")); + + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(1)); + var alias = as(eval.fields().getFirst(), Alias.class); + assertThat(alias.name(), is("sum(salary) + 1 where false")); + var add = as(alias.child(), Add.class); + var literal = as(add.right(), Literal.class); + assertThat(literal.fold(), is(1)); + + var limit = as(eval.child(), Limit.class); + var source = as(limit.child(), LocalRelation.class); + + Block[] blocks = source.supplier().get(); + assertThat(blocks.length, is(1)); + assertThat(blocks[0].getPositionCount(), is(1)); + assertTrue(blocks[0].areAllValuesNull()); + } + + /* + * Project[[sum(salary) + 1 where false{r}#4, sum(salary) + 2{r}#6, emp_no{f}#7]] + * \_Eval[[null[LONG] AS sum(salary) + 1 where false, $$SUM$sum(salary)_+_2$1{r$}#18 + 2[INTEGER] AS sum(salary) + 2]] + * \_Limit[1000[INTEGER]] + * \_Aggregate[STANDARD,[emp_no{f}#7],[SUM(salary{f}#12,true[BOOLEAN]) AS $$SUM$sum(salary)_+_2$1, emp_no{f}#7]] + * \_EsRelation[test][_meta_field{f}#13, emp_no{f}#7, first_name{f}#8, ge..] + */ + public void testReplaceStatsFilteredAggWithEvalMixedFilterAndNoFilter() { + var plan = plan(""" + from test + | stats sum(salary) + 1 where false, + sum(salary) + 2 + by emp_no + """); + + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("sum(salary) + 1 where false", "sum(salary) + 2", "emp_no")); + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(2)); + + var alias = as(eval.fields().getFirst(), Alias.class); + assertTrue(alias.child().foldable()); + assertThat(alias.child().fold(), nullValue()); + assertThat(alias.child().dataType(), is(LONG)); + + alias = as(eval.fields().getLast(), Alias.class); + assertThat(Expressions.name(alias.child()), containsString("sum(salary) + 2")); + + var limit = as(eval.child(), Limit.class); + var aggregate = as(limit.child(), Aggregate.class); + var source = as(aggregate.child(), EsRelation.class); + } + + /* + * Project[[sum(salary) + 1 where false{r}#3, sum(salary) + 3{r}#5, sum(salary) + 2 where false{r}#7]] + * \_Eval[[null[LONG] AS sum(salary) + 1 where false, $$SUM$sum(salary)_+_3$1{r$}#19 + 3[INTEGER] AS sum(salary) + 3, nu + * ll[LONG] AS sum(salary) + 2 where false]] + * \_Limit[1000[INTEGER]] + * \_Aggregate[STANDARD,[],[SUM(salary{f}#13,true[BOOLEAN]) AS $$SUM$sum(salary)_+_3$1]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + */ + public void testReplaceStatsFilteredAggWithEvalFilterFalseAndNull() { + var plan = plan(""" + from test + | stats sum(salary) + 1 where false, + sum(salary) + 3, + sum(salary) + 2 where null + """); + + var project = as(plan, Project.class); + assertThat( + Expressions.names(project.projections()), + contains("sum(salary) + 1 where false", "sum(salary) + 3", "sum(salary) + 2 where null") + ); + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(3)); + + var alias = as(eval.fields().getFirst(), Alias.class); + assertTrue(alias.child().foldable()); + assertThat(alias.child().fold(), nullValue()); + assertThat(alias.child().dataType(), is(LONG)); + + alias = as(eval.fields().get(1), Alias.class); + assertThat(Expressions.name(alias.child()), containsString("sum(salary) + 3")); + + alias = as(eval.fields().getLast(), Alias.class); + assertTrue(alias.child().foldable()); + assertThat(alias.child().fold(), nullValue()); + assertThat(alias.child().dataType(), is(LONG)); + + var limit = as(eval.child(), Limit.class); + var aggregate = as(limit.child(), Aggregate.class); + var source = as(aggregate.child(), EsRelation.class); + } + + /* + * Limit[1000[INTEGER]] + * \_LocalRelation[[count(salary) where false{r}#3],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]]]] + */ + public void testReplaceStatsFilteredAggWithEvalCount() { + var plan = plan(""" + from test + | stats count(salary) where false + """); + + var limit = as(plan, Limit.class); + var source = as(limit.child(), LocalRelation.class); + assertThat(Expressions.names(source.output()), contains("count(salary) where false")); + Block[] blocks = source.supplier().get(); + assertThat(blocks.length, is(1)); + var block = as(blocks[0], LongVectorBlock.class); + assertThat(block.getPositionCount(), is(1)); + assertThat(block.asVector().getLong(0), is(0L)); + } + + /* + * Project[[count_distinct(salary + 2) + 3 where false{r}#3]] + * \_Eval[[$$COUNTDISTINCT$count_distinct(>$0{r$}#15 + 3[INTEGER] AS count_distinct(salary + 2) + 3 where false]] + * \_Limit[1000[INTEGER]] + * \_LocalRelation[[$$COUNTDISTINCT$count_distinct(>$0{r$}#15],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]]]] + */ + public void testReplaceStatsFilteredAggWithEvalCountDistinctInExpression() { + var plan = plan(""" + from test + | stats count_distinct(salary + 2) + 3 where false + """); + + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("count_distinct(salary + 2) + 3 where false")); + + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(1)); + var alias = as(eval.fields().getFirst(), Alias.class); + assertThat(alias.name(), is("count_distinct(salary + 2) + 3 where false")); + var add = as(alias.child(), Add.class); + var literal = as(add.right(), Literal.class); + assertThat(literal.fold(), is(3)); + + var limit = as(eval.child(), Limit.class); + var source = as(limit.child(), LocalRelation.class); + + Block[] blocks = source.supplier().get(); + assertThat(blocks.length, is(1)); + var block = as(blocks[0], LongVectorBlock.class); + assertThat(block.getPositionCount(), is(1)); + assertThat(block.asVector().getLong(0), is(0L)); + } + + /* + * Project[[max{r}#91, max_a{r}#94, min{r}#97, min_a{r}#100, emp_no{f}#101]] + * \_Eval[[null[INTEGER] AS max_a, null[INTEGER] AS min_a]] + * \_Limit[1000[INTEGER]] + * \_Aggregate[STANDARD,[emp_no{f}#101],[MAX(salary{f}#106,true[BOOLEAN]) AS max, MIN(salary{f}#106,true[BOOLEAN]) AS min, emp_ + * no{f}#101]] + * \_EsRelation[test][_meta_field{f}#107, emp_no{f}#101, first_name{f}#10..] + */ + public void testReplaceStatsFilteredAggWithEvalSameAggWithAndWithoutFilter() { + var plan = plan(""" + from test + | stats max = max(salary), max_a = max(salary) where null, + min = min(salary), min_a = min(salary) where to_string(null) == "abc" + by emp_no + """); + + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("max", "max_a", "min", "min_a", "emp_no")); + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(2)); + + var alias = as(eval.fields().getFirst(), Alias.class); + assertThat(Expressions.name(alias), containsString("max_a")); + assertTrue(alias.child().foldable()); + assertThat(alias.child().fold(), nullValue()); + assertThat(alias.child().dataType(), is(INTEGER)); + + alias = as(eval.fields().getLast(), Alias.class); + assertThat(Expressions.name(alias), containsString("min_a")); + assertTrue(alias.child().foldable()); + assertThat(alias.child().fold(), nullValue()); + assertThat(alias.child().dataType(), is(INTEGER)); + + var limit = as(eval.child(), Limit.class); + + var aggregate = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(aggregate.aggregates()), contains("max", "min", "emp_no")); + + var source = as(aggregate.child(), EsRelation.class); + } + + /* + * Limit[1000[INTEGER]] + * \_LocalRelation[[count{r}#7],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]]]] + */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100634") // i.e. PropagateEvalFoldables applicability to Aggs + public void testReplaceStatsFilteredAggWithEvalFilterUsingEvaledValue() { + var plan = plan(""" + from test + | eval my_length = length(concat(first_name, null)) + | stats count = count(my_length) where my_length > 0 + """); + + var limit = as(plan, Limit.class); + var source = as(limit.child(), LocalRelation.class); + assertThat(Expressions.names(source.output()), contains("count")); + Block[] blocks = source.supplier().get(); + assertThat(blocks.length, is(1)); + var block = as(blocks[0], LongVectorBlock.class); + assertThat(block.getPositionCount(), is(1)); + assertThat(block.asVector().getLong(0), is(0L)); + } + + /* + * Project[[c{r}#67, emp_no{f}#68]] + * \_Eval[[0[LONG] AS c]] + * \_Limit[1000[INTEGER]] + * \_Aggregate[STANDARD,[emp_no{f}#68],[emp_no{f}#68]] + * \_EsRelation[test][_meta_field{f}#74, emp_no{f}#68, first_name{f}#69, ..] + */ + public void testReplaceStatsFilteredAggWithEvalSingleAggWithGroup() { + var plan = plan(""" + from test + | stats c = count(emp_no) where false + by emp_no + """); + + var project = as(plan, Project.class); + assertThat(Expressions.names(project.projections()), contains("c", "emp_no")); + + var eval = as(project.child(), Eval.class); + assertThat(eval.fields().size(), is(1)); + var alias = as(eval.fields().getFirst(), Alias.class); + assertThat(Expressions.name(alias), containsString("c")); + + var limit = as(eval.child(), Limit.class); + + var aggregate = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(aggregate.aggregates()), contains("emp_no")); + + var source = as(aggregate.child(), EsRelation.class); + } + public void testQlComparisonOptimizationsApply() { var plan = plan(""" from test From bf67e237b6c611d7ac55f8a775adf64724caa24a Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 13 Nov 2024 11:31:29 +0100 Subject: [PATCH 76/95] Fix TranslogDeletionPolicy when assertions are disabled (#116654) Current code causes a NPE when assertions are disabled: the openTranslogRef is only non-null when assertions are enabled. --- .../elasticsearch/index/translog/TranslogDeletionPolicy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java index 6ac7313a1c51b..2700cba0abc3c 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicy.java @@ -24,7 +24,7 @@ public final class TranslogDeletionPolicy { private final Map openTranslogRef; public void assertNoOpenTranslogRefs() { - if (openTranslogRef.isEmpty() == false) { + if (Assertions.ENABLED && openTranslogRef.isEmpty() == false) { AssertionError e = new AssertionError("not all translog generations have been released"); openTranslogRef.values().forEach(e::addSuppressed); throw e; From 9584d10078d156e62736ad58aea1985252b889d4 Mon Sep 17 00:00:00 2001 From: Dimitris Rempapis Date: Wed, 13 Nov 2024 12:50:39 +0200 Subject: [PATCH 77/95] _validate request does not honour ignore_unavailable (#116656) The IndicesOption has been updated into the ValidateQueryRequest to encapsulate the following logic. If we target a closed index and ignore_unavailable=false, we get an IndexClosedException, otherwise if the request contains ignore_unavailable=true, we safely skip the closed index. --- docs/changelog/116656.yaml | 6 ++ .../indices/IndicesOptionsIntegrationIT.java | 4 +- .../validate/SimpleValidateQueryIT.java | 60 ++++++++++++++++--- .../validate/query/ValidateQueryRequest.java | 2 +- 4 files changed, 62 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/116656.yaml diff --git a/docs/changelog/116656.yaml b/docs/changelog/116656.yaml new file mode 100644 index 0000000000000..eb5d5a1cfc201 --- /dev/null +++ b/docs/changelog/116656.yaml @@ -0,0 +1,6 @@ +pr: 116656 +summary: _validate does not honour ignore_unavailable +area: Search +type: bug +issues: + - 116594 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index f51dd87e8eeff..f41277c5b80ca 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -287,7 +287,7 @@ public void testWildcardBehaviour() throws Exception { verify(indicesStats(indices), false); verify(forceMerge(indices), false); verify(refreshBuilder(indices), false); - verify(validateQuery(indices), true); + verify(validateQuery(indices), false); verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); @@ -338,7 +338,7 @@ public void testWildcardBehaviour() throws Exception { verify(indicesStats(indices), false); verify(forceMerge(indices), false); verify(refreshBuilder(indices), false); - verify(validateQuery(indices), true); + verify(validateQuery(indices), false); verify(getAliases(indices), false); verify(getFieldMapping(indices), false); verify(getMapping(indices), false); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 37d2f4e1a9387..388421b6dd53f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -9,16 +9,18 @@ package org.elasticsearch.validate; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.TermsLookup; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -207,12 +209,8 @@ public void testExplainDateRangeInQueryString() { } public void testValidateEmptyCluster() { - try { - indicesAdmin().prepareValidateQuery().get(); - fail("Expected IndexNotFoundException"); - } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index [_all] and no indices exist")); - } + ValidateQueryResponse response = indicesAdmin().prepareValidateQuery().get(); + assertThat(response.getTotalShards(), is(0)); } public void testExplainNoQuery() { @@ -379,4 +377,52 @@ public void testExplainTermsQueryWithLookup() { ValidateQueryResponse response = indicesAdmin().prepareValidateQuery("twitter").setQuery(termsLookupQuery).setExplain(true).get(); assertThat(response.isValid(), is(true)); } + + public void testOneClosedIndex() { + createIndex("test"); + + boolean ignoreUnavailable = false; + IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false); + client().admin().indices().close(new CloseIndexRequest("test")).actionGet(); + IndexClosedException ex = expectThrows( + IndexClosedException.class, + indicesAdmin().prepareValidateQuery("test").setIndicesOptions(options) + ); + assertEquals("closed", ex.getMessage()); + } + + public void testOneClosedIndexIgnoreUnavailable() { + createIndex("test"); + + boolean ignoreUnavailable = true; + IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false); + client().admin().indices().close(new CloseIndexRequest("test")).actionGet(); + ValidateQueryResponse response = indicesAdmin().prepareValidateQuery("test").setIndicesOptions(options).get(); + assertThat(response.getTotalShards(), is(0)); + } + + public void testTwoIndicesOneClosed() { + createIndex("test1"); + createIndex("test2"); + + boolean ignoreUnavailable = false; + IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false); + client().admin().indices().close(new CloseIndexRequest("test1")).actionGet(); + IndexClosedException ex = expectThrows( + IndexClosedException.class, + indicesAdmin().prepareValidateQuery("test1", "test2").setIndicesOptions(options) + ); + assertEquals("closed", ex.getMessage()); + } + + public void testTwoIndicesOneClosedIgnoreUnavailable() { + createIndex("test1"); + createIndex("test2"); + + boolean ignoreUnavailable = true; + IndicesOptions options = IndicesOptions.fromOptions(ignoreUnavailable, true, true, false, true, true, false, false); + client().admin().indices().close(new CloseIndexRequest("test1")).actionGet(); + ValidateQueryResponse response = indicesAdmin().prepareValidateQuery("test1", "test2").setIndicesOptions(options).get(); + assertThat(response.getTotalShards(), is(1)); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index 4c3f32240ca8c..f30206c1d238a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -32,7 +32,7 @@ */ public final class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject { - public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.fromOptions(false, false, true, false); + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosed(); private QueryBuilder query = new MatchAllQueryBuilder(); From 126cf6c0a8f09196c9204a7c0d464b9fa7c18e0a Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 13 Nov 2024 11:48:05 +0000 Subject: [PATCH 78/95] Remove java.time date-time parsing fallback (#116572) Only use the ISO date-time parser now --- .../test/data_stream/150_tsdb.yml | 8 +- .../common/time/DateFormatters.java | 467 ++++-------------- .../common/time/DateFormattersTests.java | 8 +- .../org/elasticsearch/test/ESTestCase.java | 5 - 4 files changed, 116 insertions(+), 372 deletions(-) diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index de5cf3baa744e..3fbf85ab1e702 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -244,17 +244,17 @@ TSDB failures go to failure store: refresh: true body: - '{ "create": { "_index": "fs-k8s"} }' - - '{"@timestamp":"2021-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"@timestamp":"2021-04-28T01:00:00Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{ "create": { "_index": "k8s"} }' - - '{ "@timestamp": "2021-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "@timestamp": "2021-04-28T01:00:00Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{ "create": { "_index": "fs-k8s"} }' - '{ "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{ "create": { "_index": "fs-k8s"} }' - - '{ "@timestamp":"2000-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "@timestamp":"2000-04-28T01:00:00Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{ "create": { "_index": "k8s"} }' - '{"metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{ "create": { "_index": "k8s"} }' - - '{ "@timestamp":"2000-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "@timestamp":"2000-04-28T01:00:00Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - is_true: errors # Successfully indexed to backing index diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index fc3674a6016aa..48a764826bad2 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -10,10 +10,7 @@ package org.elasticsearch.common.time; import org.elasticsearch.common.Strings; -import org.elasticsearch.core.Booleans; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.logging.internal.spi.LoggerFactory; import java.time.Instant; import java.time.LocalDate; @@ -45,31 +42,9 @@ import static java.time.temporal.ChronoField.MONTH_OF_YEAR; import static java.time.temporal.ChronoField.NANO_OF_SECOND; import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; -import static org.elasticsearch.common.util.ArrayUtils.prepend; public class DateFormatters { - /** - * The ISO8601 parser is as close as possible to the java.time based parsers, but there are some strings - * that are no longer accepted (multiple fractional seconds, or multiple timezones) by the ISO parser. - * If a string cannot be parsed by the ISO parser, it then tries the java.time one. - * If there's lots of these strings, trying the ISO parser, then the java.time parser, might cause a performance drop. - * So provide a JVM option so that users can just use the java.time parsers, if they really need to. - *

- * Note that this property is sometimes set by {@code ESTestCase.setTestSysProps} to flip between implementations in tests, - * to ensure both are fully tested - */ - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // evaluate if we need to deprecate/remove this - private static final boolean JAVA_TIME_PARSERS_ONLY = Booleans.parseBoolean(System.getProperty("es.datetime.java_time_parsers"), false); - - static { - // when this is used directly in tests ES logging may not have been initialized yet - LoggerFactory logger; - if (JAVA_TIME_PARSERS_ONLY && (logger = LoggerFactory.provider()) != null) { - logger.getLogger(DateFormatters.class).info("Using java.time datetime parsers only"); - } - } - private static DateFormatter newDateFormatter(String format, DateTimeFormatter formatter) { return new JavaDateFormatter(format, new JavaTimeDateTimePrinter(formatter), new JavaTimeDateTimeParser(formatter)); } @@ -159,81 +134,14 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p .toFormatter(Locale.ROOT) .withResolverStyle(ResolverStyle.STRICT); - private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER = new DateTimeFormatterBuilder().append( - STRICT_YEAR_MONTH_DAY_FORMATTER - ) - .optionalStart() - .appendLiteral('T') - .optionalStart() - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendLiteral(':') - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendLiteral(':') - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendFraction(NANO_OF_SECOND, 1, 9, true) - .optionalEnd() - .optionalStart() - .appendLiteral(',') - .appendFraction(NANO_OF_SECOND, 1, 9, false) - .optionalEnd() - .optionalEnd() - .optionalEnd() - .optionalStart() - .appendZoneOrOffsetId() - .optionalEnd() - .optionalStart() - .append(TIME_ZONE_FORMATTER_NO_COLON) - .optionalEnd() - .optionalEnd() - .optionalEnd() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT); - /** * Returns a generic ISO datetime parser where the date is mandatory and the time is optional. */ - private static final DateFormatter STRICT_DATE_OPTIONAL_TIME; - static { - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(STRICT_DATE_OPTIONAL_TIME_FORMATTER); - - STRICT_DATE_OPTIONAL_TIME = new JavaDateFormatter( - "strict_date_optional_time", - new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser(Set.of(), false, null, DecimalSeparator.BOTH, TimezonePresence.OPTIONAL).withLocale( - Locale.ROOT - ), - javaTimeParser } - ); - } - - private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS = new DateTimeFormatterBuilder().append( - STRICT_YEAR_MONTH_DAY_FORMATTER - ) - .optionalStart() - .appendLiteral('T') - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - .optionalStart() - .appendFraction(NANO_OF_SECOND, 1, 9, true) - .optionalEnd() - .optionalStart() - .appendLiteral(',') - .appendFraction(NANO_OF_SECOND, 1, 9, false) - .optionalEnd() - .optionalStart() - .appendZoneOrOffsetId() - .optionalEnd() - .optionalStart() - .append(TIME_ZONE_FORMATTER_NO_COLON) - .optionalEnd() - .optionalEnd() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT); + private static final DateFormatter STRICT_DATE_OPTIONAL_TIME = new JavaDateFormatter( + "strict_date_optional_time", + new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), + new Iso8601DateTimeParser(Set.of(), false, null, DecimalSeparator.BOTH, TimezonePresence.OPTIONAL).withLocale(Locale.ROOT) + ); private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS = new DateTimeFormatterBuilder().append( STRICT_YEAR_MONTH_DAY_PRINTER @@ -262,79 +170,28 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p /** * Returns a generic ISO datetime parser where the date is mandatory and the time is optional with nanosecond resolution. */ - private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS; - static { - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS); - - STRICT_DATE_OPTIONAL_TIME_NANOS = new JavaDateFormatter( - "strict_date_optional_time_nanos", - new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser( - Set.of(HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), - true, - null, - DecimalSeparator.BOTH, - TimezonePresence.OPTIONAL - ).withLocale(Locale.ROOT), - javaTimeParser } - ); - } + private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS = new JavaDateFormatter( + "strict_date_optional_time_nanos", + new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS), + new Iso8601DateTimeParser( + Set.of(HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + true, + null, + DecimalSeparator.BOTH, + TimezonePresence.OPTIONAL + ).withLocale(Locale.ROOT) + ); /** * Returns a ISO 8601 compatible date time formatter and parser. * This is not fully compatible to the existing spec, which would require far more edge cases, but merely compatible with the * existing legacy joda time ISO date formatter */ - private static final DateFormatter ISO_8601; - static { - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser( - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .optionalStart() - .appendLiteral('T') - .optionalStart() - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendLiteral(':') - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendLiteral(':') - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendFraction(NANO_OF_SECOND, 1, 9, true) - .optionalEnd() - .optionalStart() - .appendLiteral(",") - .appendFraction(NANO_OF_SECOND, 1, 9, false) - .optionalEnd() - .optionalEnd() - .optionalEnd() - .optionalEnd() - .optionalStart() - .appendZoneOrOffsetId() - .optionalEnd() - .optionalStart() - .append(TIME_ZONE_FORMATTER_NO_COLON) - .optionalEnd() - .optionalEnd() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ); - - ISO_8601 = new JavaDateFormatter( - "iso8601", - new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser(Set.of(), false, null, DecimalSeparator.BOTH, TimezonePresence.OPTIONAL).withLocale( - Locale.ROOT - ), - javaTimeParser } - ); - } + private static final DateFormatter ISO_8601 = new JavaDateFormatter( + "iso8601", + new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), + new Iso8601DateTimeParser(Set.of(), false, null, DecimalSeparator.BOTH, TimezonePresence.OPTIONAL).withLocale(Locale.ROOT) + ); ///////////////////////////////////////// // @@ -755,53 +612,33 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p /* * A strict formatter that formats or parses a year and a month, such as '2011-12'. */ - private static final DateFormatter STRICT_YEAR_MONTH; - static { - DateTimeFormatter javaTimeFormatter = new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) - .appendLiteral("-") - .appendValue(MONTH_OF_YEAR, 2, 2, SignStyle.NOT_NEGATIVE) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT); - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(javaTimeFormatter); - - STRICT_YEAR_MONTH = new JavaDateFormatter( - "strict_year_month", - new JavaTimeDateTimePrinter(javaTimeFormatter), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR), - false, - MONTH_OF_YEAR, - DecimalSeparator.BOTH, - TimezonePresence.FORBIDDEN - ).withLocale(Locale.ROOT), - javaTimeParser } - ); - } + private static final DateFormatter STRICT_YEAR_MONTH = new JavaDateFormatter( + "strict_year_month", + new JavaTimeDateTimePrinter( + new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) + .appendLiteral("-") + .appendValue(MONTH_OF_YEAR, 2, 2, SignStyle.NOT_NEGATIVE) + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + new Iso8601DateTimeParser(Set.of(MONTH_OF_YEAR), false, MONTH_OF_YEAR, DecimalSeparator.BOTH, TimezonePresence.FORBIDDEN) + .withLocale(Locale.ROOT) + ); /* * A strict formatter that formats or parses a year, such as '2011'. */ - private static final DateFormatter STRICT_YEAR; - static { - DateTimeFormatter javaTimeFormatter = new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT); - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(javaTimeFormatter); - - STRICT_YEAR = new JavaDateFormatter( - "strict_year", - new JavaTimeDateTimePrinter(javaTimeFormatter), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser(Set.of(), false, ChronoField.YEAR, DecimalSeparator.BOTH, TimezonePresence.FORBIDDEN) - .withLocale(Locale.ROOT), - javaTimeParser } - ); - } + private static final DateFormatter STRICT_YEAR = new JavaDateFormatter( + "strict_year", + new JavaTimeDateTimePrinter( + new DateTimeFormatterBuilder().appendValue(ChronoField.YEAR, 4, 4, SignStyle.EXCEEDS_PAD) + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + new Iso8601DateTimeParser(Set.of(), false, ChronoField.YEAR, DecimalSeparator.BOTH, TimezonePresence.FORBIDDEN).withLocale( + Locale.ROOT + ) + ); /* * A strict formatter that formats or parses a hour, minute and second, such as '09:43:25'. @@ -832,39 +669,17 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * Returns a formatter that combines a full date and time, separated by a 'T' * (uuuu-MM-dd'T'HH:mm:ss.SSSZZ). */ - private static final DateFormatter STRICT_DATE_TIME; - static { - DateTimeParser[] javaTimeParsers = new DateTimeParser[] { - new JavaTimeDateTimeParser( - new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER) - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ), - new JavaTimeDateTimeParser( - new DateTimeFormatterBuilder().append(STRICT_DATE_FORMATTER) - .append(TIME_ZONE_FORMATTER_NO_COLON) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ) }; - - STRICT_DATE_TIME = new JavaDateFormatter( - "strict_date_time", - new JavaTimeDateTimePrinter(STRICT_DATE_PRINTER), - JAVA_TIME_PARSERS_ONLY - ? javaTimeParsers - : prepend( - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), - false, - null, - DecimalSeparator.DOT, - TimezonePresence.MANDATORY - ).withLocale(Locale.ROOT), - javaTimeParsers - ) - ); - } + private static final DateFormatter STRICT_DATE_TIME = new JavaDateFormatter( + "strict_date_time", + new JavaTimeDateTimePrinter(STRICT_DATE_PRINTER), + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + null, + DecimalSeparator.DOT, + TimezonePresence.MANDATORY + ).withLocale(Locale.ROOT) + ); private static final DateTimeFormatter STRICT_ORDINAL_DATE_TIME_NO_MILLIS_BASE = new DateTimeFormatterBuilder().appendValue( ChronoField.YEAR, @@ -907,44 +722,22 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * Returns a formatter that combines a full date and time without millis, * separated by a 'T' (uuuu-MM-dd'T'HH:mm:ssZZ). */ - private static final DateFormatter STRICT_DATE_TIME_NO_MILLIS; - static { - DateTimeParser[] javaTimeParsers = new DateTimeParser[] { - new JavaTimeDateTimeParser( - new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ), - new JavaTimeDateTimeParser( - new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) - .append(TIME_ZONE_FORMATTER_NO_COLON) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ) }; - - STRICT_DATE_TIME_NO_MILLIS = new JavaDateFormatter( - "strict_date_time_no_millis", - new JavaTimeDateTimePrinter( - new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) - .appendOffset("+HH:MM", "Z") - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ), - JAVA_TIME_PARSERS_ONLY - ? javaTimeParsers - : prepend( - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), - false, - SECOND_OF_MINUTE, - DecimalSeparator.BOTH, - TimezonePresence.MANDATORY - ).withLocale(Locale.ROOT), - javaTimeParsers - ) - ); - } + private static final DateFormatter STRICT_DATE_TIME_NO_MILLIS = new JavaDateFormatter( + "strict_date_time_no_millis", + new JavaTimeDateTimePrinter( + new DateTimeFormatterBuilder().append(STRICT_DATE_TIME_NO_MILLIS_FORMATTER) + .appendOffset("+HH:MM", "Z") + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ), + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + SECOND_OF_MINUTE, + DecimalSeparator.BOTH, + TimezonePresence.MANDATORY + ).withLocale(Locale.ROOT) + ); // NOTE: this is not a strict formatter to retain the joda time based behaviour, even though it's named like this private static final DateTimeFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER = new DateTimeFormatterBuilder().append( @@ -980,75 +773,41 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * two digit minute of hour, two digit second of minute, and three digit * fraction of second (uuuu-MM-dd'T'HH:mm:ss.SSS). */ - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION; - static { - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser( + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter( + "strict_date_hour_minute_second_fraction", + new JavaTimeDateTimePrinter( new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - // this one here is lenient as well to retain joda time based bwc compatibility - .appendFraction(NANO_OF_SECOND, 1, 9, true) + .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) .toFormatter(Locale.ROOT) .withResolverStyle(ResolverStyle.STRICT) - ); - - STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter( - "strict_date_hour_minute_second_fraction", - new JavaTimeDateTimePrinter( - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), - false, - null, - DecimalSeparator.DOT, - TimezonePresence.FORBIDDEN - ).withLocale(Locale.ROOT), - javaTimeParser } - ); - } + ), + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), + false, + null, + DecimalSeparator.DOT, + TimezonePresence.FORBIDDEN + ).withLocale(Locale.ROOT) + ); - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS; - static { - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser( + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( + "strict_date_hour_minute_second_millis", + new JavaTimeDateTimePrinter( new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - // this one here is lenient as well to retain joda time based bwc compatibility - .appendFraction(NANO_OF_SECOND, 1, 9, true) + .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) .toFormatter(Locale.ROOT) .withResolverStyle(ResolverStyle.STRICT) - ); - - STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( - "strict_date_hour_minute_second_millis", - new JavaTimeDateTimePrinter( - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .appendLiteral("T") - .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), - false, - null, - DecimalSeparator.DOT, - TimezonePresence.FORBIDDEN - ).withLocale(Locale.ROOT), - javaTimeParser } - ); - } + ), + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE, NANO_OF_SECOND), + false, + null, + DecimalSeparator.DOT, + TimezonePresence.FORBIDDEN + ).withLocale(Locale.ROOT) + ); /* * Returns a formatter for a two digit hour of day. (HH) @@ -1362,27 +1121,17 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p * two digit minute of hour, and two digit second of * minute. (uuuu-MM-dd'T'HH:mm:ss) */ - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND; - static { - DateTimeFormatter javaTimeFormatter = DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss", Locale.ROOT); - DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(javaTimeFormatter); - - STRICT_DATE_HOUR_MINUTE_SECOND = new JavaDateFormatter( - "strict_date_hour_minute_second", - new JavaTimeDateTimePrinter(javaTimeFormatter), - JAVA_TIME_PARSERS_ONLY - ? new DateTimeParser[] { javaTimeParser } - : new DateTimeParser[] { - new Iso8601DateTimeParser( - Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), - false, - SECOND_OF_MINUTE, - DecimalSeparator.BOTH, - TimezonePresence.FORBIDDEN - ).withLocale(Locale.ROOT), - javaTimeParser } - ); - } + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND = new JavaDateFormatter( + "strict_date_hour_minute_second", + new JavaTimeDateTimePrinter(DateTimeFormatter.ofPattern("uuuu-MM-dd'T'HH:mm:ss", Locale.ROOT)), + new Iso8601DateTimeParser( + Set.of(MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), + false, + SECOND_OF_MINUTE, + DecimalSeparator.BOTH, + TimezonePresence.FORBIDDEN + ).withLocale(Locale.ROOT) + ); /* * A basic formatter for a full date as four digit year, two digit diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index b197fc3d5dc25..b9755ba250f47 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -1244,16 +1244,16 @@ public void testStrictParsing() { assertParseException("2018-12-31T12:12:12", "strict_date_hour_minute_second_millis", 19); assertParseException("2018-12-31T12:12:12", "strict_date_hour_minute_second_fraction", 19); assertParses("2018-12-31", "strict_date_optional_time"); - assertParseException("2018-12-1", "strict_date_optional_time", 7); - assertParseException("2018-1-31", "strict_date_optional_time", 4); + assertParseException("2018-12-1", "strict_date_optional_time", 8); + assertParseException("2018-1-31", "strict_date_optional_time", 5); assertParseException("10000-01-31", "strict_date_optional_time", 4); assertParses("2010-01-05T02:00", "strict_date_optional_time"); assertParses("2018-12-31T10:15:30", "strict_date_optional_time"); assertParses("2018-12-31T10:15:30Z", "strict_date_optional_time"); assertParses("2018-12-31T10:15:30+0100", "strict_date_optional_time"); assertParses("2018-12-31T10:15:30+01:00", "strict_date_optional_time"); - assertParseException("2018-12-31T10:15:3", "strict_date_optional_time", 16); - assertParseException("2018-12-31T10:5:30", "strict_date_optional_time", 13); + assertParseException("2018-12-31T10:15:3", "strict_date_optional_time", 17); + assertParseException("2018-12-31T10:5:30", "strict_date_optional_time", 14); assertParseException("2018-12-31T9:15:30", "strict_date_optional_time", 11); assertParses("2015-01-04T00:00Z", "strict_date_optional_time"); assertParses("2018-12-31T10:15:30.1Z", "strict_date_time"); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 1edc800956a67..207409dfcf751 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -435,11 +435,6 @@ private static void setTestSysProps(Random random) { // We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each // other if we allow them to set the number of available processors as it's set-once in Netty. System.setProperty("es.set.netty.runtime.available.processors", "false"); - - // sometimes use the java.time date formatters - if (random.nextBoolean()) { - System.setProperty("es.datetime.java_time_parsers", "true"); - } } protected final Logger logger = LogManager.getLogger(getClass()); From 31492f52a3ba4468ec533941db09943b2ec8e381 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 13 Nov 2024 13:04:12 +0100 Subject: [PATCH 79/95] [Gradle] Fix configuration cache for validateChangelogs task definition (#116716) --- build.gradle | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index a91347ca6e19b..71386a37cbb0d 100644 --- a/build.gradle +++ b/build.gradle @@ -420,8 +420,11 @@ gradle.projectsEvaluated { } } -tasks.named("validateChangelogs") { - onlyIf { project.gradle.startParameter.taskNames.any { it.startsWith("checkPart") || it == 'functionalTests' } == false } +tasks.named("validateChangelogs").configure { + def triggeredTaskNames = gradle.startParameter.taskNames + onlyIf { + triggeredTaskNames.any { it.startsWith("checkPart") || it == 'functionalTests' } == false + } } tasks.named("precommit") { From bada2a60ed8561d80cdfd61b28883b6a7002b023 Mon Sep 17 00:00:00 2001 From: kosabogi <105062005+kosabogi@users.noreply.github.com> Date: Wed, 13 Nov 2024 14:14:56 +0100 Subject: [PATCH 80/95] Updates chunk settings documentation (#116719) --- docs/reference/mapping/types/semantic-text.asciidoc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/reference/mapping/types/semantic-text.asciidoc b/docs/reference/mapping/types/semantic-text.asciidoc index ac23c153e01a3..684ad7c369e7d 100644 --- a/docs/reference/mapping/types/semantic-text.asciidoc +++ b/docs/reference/mapping/types/semantic-text.asciidoc @@ -87,7 +87,7 @@ Trying to <> that is used on a [discrete] [[auto-text-chunking]] -==== Automatic text chunking +==== Text chunking {infer-cap} endpoints have a limit on the amount of text they can process. To allow for large amounts of text to be used in semantic search, `semantic_text` automatically generates smaller passages if needed, called _chunks_. @@ -95,8 +95,7 @@ To allow for large amounts of text to be used in semantic search, `semantic_text Each chunk will include the text subpassage and the corresponding embedding generated from it. When querying, the individual passages will be automatically searched for each document, and the most relevant passage will be used to compute a score. -Documents are split into 250-word sections with a 100-word overlap so that each section shares 100 words with the previous section. -This overlap ensures continuity and prevents vital contextual information in the input text from being lost by a hard break. +For more details on chunking and how to configure chunking settings, see <> in the Inference API documentation. [discrete] From d1788af03f670c0d1a76cb6d9270c6dc61d1adad Mon Sep 17 00:00:00 2001 From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Date: Wed, 13 Nov 2024 08:42:07 -0500 Subject: [PATCH 81/95] Update service-elser.asciidoc (#116272) --- docs/reference/inference/service-elser.asciidoc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/reference/inference/service-elser.asciidoc b/docs/reference/inference/service-elser.asciidoc index 273d743e47a4b..262bdfbca002f 100644 --- a/docs/reference/inference/service-elser.asciidoc +++ b/docs/reference/inference/service-elser.asciidoc @@ -7,6 +7,12 @@ You can also deploy ELSER by using the <>. NOTE: The API request will automatically download and deploy the ELSER model if it isn't already downloaded. +[WARNING] +.Deprecated in 8.16 +==== +The elser service is deprecated and will be removed in a future release. +Use the <> instead, with model_id included in the service_settings. +==== [discrete] [[infer-service-elser-api-request]] @@ -173,4 +179,4 @@ PUT _inference/sparse_embedding/my-elser-model } } ------------------------------------------------------------ -// TEST[skip:TBD] \ No newline at end of file +// TEST[skip:TBD] From b23601108d0d092d7d1dcf22ede9aa7f8e97c86f Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 14 Nov 2024 00:47:15 +1100 Subject: [PATCH 82/95] Mute org.elasticsearch.snapshots.SnapshotShutdownIT testRestartNodeDuringSnapshot #116730 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 53bbe4fbc1d22..fa7ce1509574e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -236,6 +236,9 @@ tests: - class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT method: testSettingsApplied issue: https://github.com/elastic/elasticsearch/issues/116694 +- class: org.elasticsearch.snapshots.SnapshotShutdownIT + method: testRestartNodeDuringSnapshot + issue: https://github.com/elastic/elasticsearch/issues/116730 # Examples: # From 6325e46231c15da744b3ad28811279b03f1299d0 Mon Sep 17 00:00:00 2001 From: Vishal Raj Date: Wed, 13 Nov 2024 14:01:59 +0000 Subject: [PATCH 83/95] Add default ILM policies and switch to ILM for apm-data plugin (#115687) --- docs/changelog/115687.yaml | 5 ++ .../test/rest/ESRestTestCase.java | 21 +++++- .../logs-apm.app-fallback@ilm.yaml | 1 - .../logs-apm.error-fallback@ilm.yaml | 1 - .../metrics-apm.app-fallback@ilm.yaml | 1 - .../metrics-apm.internal-fallback@ilm.yaml | 1 - ....service_destination.10m-fallback@ilm.yaml | 1 - ...m.service_destination.1m-fallback@ilm.yaml | 1 - ....service_destination.60m-fallback@ilm.yaml | 1 - ...-apm.service_summary.10m-fallback@ilm.yaml | 1 - ...s-apm.service_summary.1m-fallback@ilm.yaml | 1 - ...-apm.service_summary.60m-fallback@ilm.yaml | 1 - ....service_transaction.10m-fallback@ilm.yaml | 1 - ...m.service_transaction.1m-fallback@ilm.yaml | 1 - ....service_transaction.60m-fallback@ilm.yaml | 1 - ...rics-apm.transaction.10m-fallback@ilm.yaml | 1 - ...trics-apm.transaction.1m-fallback@ilm.yaml | 1 - ...rics-apm.transaction.60m-fallback@ilm.yaml | 1 - .../traces-apm-fallback@ilm.yaml | 1 - .../traces-apm.rum-fallback@ilm.yaml | 1 - .../traces-apm.sampled-fallback@ilm.yaml | 1 - .../logs-apm.app_logs-default_policy.yaml | 16 +++++ .../logs-apm.error_logs-default_policy.yaml | 16 +++++ ...etrics-apm.app_metrics-default_policy.yaml | 16 +++++ ...s-apm.internal_metrics-default_policy.yaml | 16 +++++ ...estination_10m_metrics-default_policy.yaml | 16 +++++ ...destination_1m_metrics-default_policy.yaml | 16 +++++ ...estination_60m_metrics-default_policy.yaml | 16 +++++ ...ce_summary_10m_metrics-default_policy.yaml | 16 +++++ ...ice_summary_1m_metrics-default_policy.yaml | 16 +++++ ...ce_summary_60m_metrics-default_policy.yaml | 16 +++++ ...ransaction_10m_metrics-default_policy.yaml | 16 +++++ ...transaction_1m_metrics-default_policy.yaml | 16 +++++ ...ransaction_60m_metrics-default_policy.yaml | 16 +++++ ...ransaction_10m_metrics-default_policy.yaml | 16 +++++ ...transaction_1m_metrics-default_policy.yaml | 16 +++++ ...ransaction_60m_metrics-default_policy.yaml | 16 +++++ .../traces-apm.rum_traces-default_policy.yaml | 16 +++++ ...ces-apm.sampled_traces-default_policy.yaml | 13 ++++ .../traces-apm.traces-default_policy.yaml | 16 +++++ .../src/main/resources/resources.yaml | 23 ++++++- .../APMIndexTemplateRegistryTests.java | 66 +++++++++++++++++-- .../xpack/core/ilm/LifecyclePolicyUtils.java | 30 ++++++--- .../core/template/YamlTemplateRegistry.java | 39 ++++++++++- 44 files changed, 468 insertions(+), 36 deletions(-) create mode 100644 docs/changelog/115687.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.app_logs-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.error_logs-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.app_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.internal_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_10m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_1m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_60m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_10m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_1m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_60m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_10m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_1m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_60m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_10m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_1m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_60m_metrics-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.rum_traces-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.sampled_traces-default_policy.yaml create mode 100644 x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.traces-default_policy.yaml diff --git a/docs/changelog/115687.yaml b/docs/changelog/115687.yaml new file mode 100644 index 0000000000000..1180b4627c635 --- /dev/null +++ b/docs/changelog/115687.yaml @@ -0,0 +1,5 @@ +pr: 115687 +summary: Add default ILM policies and switch to ILM for apm-data plugin +area: Data streams +type: feature +issues: [] diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 0a3cf6726ea4a..28c9905386091 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -817,7 +817,26 @@ protected Set preserveILMPolicyIds() { ".fleet-file-tohost-meta-ilm-policy", ".deprecation-indexing-ilm-policy", ".monitoring-8-ilm-policy", - "behavioral_analytics-events-default_policy" + "behavioral_analytics-events-default_policy", + "logs-apm.app_logs-default_policy", + "logs-apm.error_logs-default_policy", + "metrics-apm.app_metrics-default_policy", + "metrics-apm.internal_metrics-default_policy", + "metrics-apm.service_destination_10m_metrics-default_policy", + "metrics-apm.service_destination_1m_metrics-default_policy", + "metrics-apm.service_destination_60m_metrics-default_policy", + "metrics-apm.service_summary_10m_metrics-default_policy", + "metrics-apm.service_summary_1m_metrics-default_policy", + "metrics-apm.service_summary_60m_metrics-default_policy", + "metrics-apm.service_transaction_10m_metrics-default_policy", + "metrics-apm.service_transaction_1m_metrics-default_policy", + "metrics-apm.service_transaction_60m_metrics-default_policy", + "metrics-apm.transaction_10m_metrics-default_policy", + "metrics-apm.transaction_1m_metrics-default_policy", + "metrics-apm.transaction_60m_metrics-default_policy", + "traces-apm.rum_traces-default_policy", + "traces-apm.sampled_traces-default_policy", + "traces-apm.traces-default_policy" ); } diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.app-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.app-fallback@ilm.yaml index 627d6345d6b77..07b1bd9cbcd7e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.app-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.app-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: logs-apm.app_logs-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error-fallback@ilm.yaml index a97c004fa1707..85d8452506493 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: logs-apm.error_logs-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.app-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.app-fallback@ilm.yaml index 23130ef8400c2..9610b38923bbb 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.app-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.app-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.app_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.internal-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.internal-fallback@ilm.yaml index 7fbf7941ea538..625db0ddf063d 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.internal-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.internal-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.internal_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.10m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.10m-fallback@ilm.yaml index a7fe53f56474b..aff33171c4b58 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.10m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.10m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_destination_10m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.1m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.1m-fallback@ilm.yaml index 274c8c604582c..46f0e74d66d6c 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.1m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.1m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_destination_1m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.60m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.60m-fallback@ilm.yaml index 2d894dec48ac4..01b5057fb4124 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.60m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_destination.60m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_destination_60m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.10m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.10m-fallback@ilm.yaml index 612bf6ff7c1d0..9a2c8cc4e0f0b 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.10m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.10m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_summary_10m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.1m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.1m-fallback@ilm.yaml index e86eb803de63f..011380ea40c1f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.1m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.1m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_summary_1m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.60m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.60m-fallback@ilm.yaml index 4b4e14eb711e0..32b4840d26a4c 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.60m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_summary.60m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_summary_60m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.10m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.10m-fallback@ilm.yaml index fc03e62bcc4cd..80118df29877f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.10m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.10m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_transaction_10m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.1m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.1m-fallback@ilm.yaml index 9021506be3d33..673c17d972c5e 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.1m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.1m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_transaction_1m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.60m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.60m-fallback@ilm.yaml index 961b0a35543a7..a04870d4224ca 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.60m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.service_transaction.60m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.service_transaction_60m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.10m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.10m-fallback@ilm.yaml index e2504def2505c..abadcbf58bd62 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.10m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.10m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.transaction_10m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.1m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.1m-fallback@ilm.yaml index 7bfbcc7bb8052..b8af9a8b96f56 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.1m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.1m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.transaction_1m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.60m-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.60m-fallback@ilm.yaml index 48e6ee5a09c20..3d13284934ade 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.60m-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/metrics-apm.transaction.60m-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: metrics-apm.transaction_60m_metrics-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm-fallback@ilm.yaml index 360693e97ae2b..7fc2ca2343ea5 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: traces-apm.traces-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.rum-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.rum-fallback@ilm.yaml index 6dfd79341424f..207307b396dc6 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.rum-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.rum-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: traces-apm.rum_traces-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.sampled-fallback@ilm.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.sampled-fallback@ilm.yaml index 2193dbf58488b..975e19693b656 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.sampled-fallback@ilm.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm.sampled-fallback@ilm.yaml @@ -8,4 +8,3 @@ template: index: lifecycle: name: traces-apm.sampled_traces-default_policy - prefer_ilm: false diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.app_logs-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.app_logs-default_policy.yaml new file mode 100644 index 0000000000000..ab73c1c357897 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.app_logs-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 10d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.error_logs-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.error_logs-default_policy.yaml new file mode 100644 index 0000000000000..ab73c1c357897 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/logs-apm.error_logs-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 10d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.app_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.app_metrics-default_policy.yaml new file mode 100644 index 0000000000000..19fbd66e954cb --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.app_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.internal_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.internal_metrics-default_policy.yaml new file mode 100644 index 0000000000000..19fbd66e954cb --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.internal_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_10m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_10m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..15c067d6720af --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_10m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 14d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 180d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_1m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_1m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..4f618ce4ff51b --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_1m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 7d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_60m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_60m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..277ef59f11300 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_destination_60m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 390d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_10m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_10m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..15c067d6720af --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_10m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 14d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 180d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_1m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_1m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..4f618ce4ff51b --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_1m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 7d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_60m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_60m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..277ef59f11300 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_summary_60m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 390d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_10m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_10m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..15c067d6720af --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_10m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 14d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 180d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_1m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_1m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..4f618ce4ff51b --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_1m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 7d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_60m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_60m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..277ef59f11300 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.service_transaction_60m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 390d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_10m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_10m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..15c067d6720af --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_10m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 14d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 180d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_1m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_1m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..4f618ce4ff51b --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_1m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 7d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_60m_metrics-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_60m_metrics-default_policy.yaml new file mode 100644 index 0000000000000..277ef59f11300 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/metrics-apm.transaction_60m_metrics-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 390d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.rum_traces-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.rum_traces-default_policy.yaml new file mode 100644 index 0000000000000..19fbd66e954cb --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.rum_traces-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 90d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.sampled_traces-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.sampled_traces-default_policy.yaml new file mode 100644 index 0000000000000..2c25f5ec568c6 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.sampled_traces-default_policy.yaml @@ -0,0 +1,13 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 1h + delete: + min_age: 1h + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.traces-default_policy.yaml b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.traces-default_policy.yaml new file mode 100644 index 0000000000000..ab73c1c357897 --- /dev/null +++ b/x-pack/plugin/apm-data/src/main/resources/lifecycle-policies/traces-apm.traces-default_policy.yaml @@ -0,0 +1,16 @@ +--- +_meta: + description: Default ILM policy for APM managed datastreams + managed: true +phases: + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + delete: + min_age: 10d + actions: + delete: {} diff --git a/x-pack/plugin/apm-data/src/main/resources/resources.yaml b/x-pack/plugin/apm-data/src/main/resources/resources.yaml index a178b768c4fe9..fa209cdec3695 100644 --- a/x-pack/plugin/apm-data/src/main/resources/resources.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/resources.yaml @@ -1,7 +1,7 @@ # "version" holds the version of the templates and ingest pipelines installed # by xpack-plugin apm-data. This must be increased whenever an existing template or # pipeline is changed, in order for it to be updated on Elasticsearch upgrade. -version: 10 +version: 11 component-templates: # Data lifecycle. @@ -97,3 +97,24 @@ ingest-pipelines: - metrics-apm@pipeline: dependencies: - apm@pipeline + +lifecycle-policies: + - logs-apm.app_logs-default_policy + - logs-apm.error_logs-default_policy + - metrics-apm.app_metrics-default_policy + - metrics-apm.internal_metrics-default_policy + - metrics-apm.service_destination_10m_metrics-default_policy + - metrics-apm.service_destination_1m_metrics-default_policy + - metrics-apm.service_destination_60m_metrics-default_policy + - metrics-apm.service_summary_10m_metrics-default_policy + - metrics-apm.service_summary_1m_metrics-default_policy + - metrics-apm.service_summary_60m_metrics-default_policy + - metrics-apm.service_transaction_10m_metrics-default_policy + - metrics-apm.service_transaction_1m_metrics-default_policy + - metrics-apm.service_transaction_60m_metrics-default_policy + - metrics-apm.transaction_10m_metrics-default_policy + - metrics-apm.transaction_1m_metrics-default_policy + - metrics-apm.transaction_60m_metrics-default_policy + - traces-apm.rum_traces-default_policy + - traces-apm.sampled_traces-default_policy + - traces-apm.traces-default_policy diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index ff1debdea79b1..4a2b9265b3b05 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -44,6 +44,8 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.ilm.OperationMode; +import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.ilm.action.PutLifecycleRequest; import org.elasticsearch.xpack.core.template.IngestPipelineConfig; import org.elasticsearch.xpack.stack.StackTemplateRegistry; import org.elasticsearch.xpack.stack.StackTemplateRegistryAccessor; @@ -57,6 +59,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -133,6 +136,7 @@ public void testThatDisablingRegistryDoesNothing() throws Exception { assertThat(apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet(), hasSize(0)); assertThat(apmIndexTemplateRegistry.getComposableTemplateConfigs().entrySet(), hasSize(0)); assertThat(apmIndexTemplateRegistry.getIngestPipelines(), hasSize(0)); + assertThat(apmIndexTemplateRegistry.getLifecyclePolicies(), hasSize(0)); client.setVerifier((a, r, l) -> { fail("if the registry is disabled nothing should happen"); @@ -145,6 +149,7 @@ public void testThatDisablingRegistryDoesNothing() throws Exception { assertThat(apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet(), not(hasSize(0))); assertThat(apmIndexTemplateRegistry.getComposableTemplateConfigs().entrySet(), not(hasSize(0))); assertThat(apmIndexTemplateRegistry.getIngestPipelines(), not(hasSize(0))); + assertThat(apmIndexTemplateRegistry.getLifecyclePolicies(), not(hasSize(0))); } public void testThatIndependentTemplatesAreAddedImmediatelyIfMissing() throws Exception { @@ -154,23 +159,26 @@ public void testThatIndependentTemplatesAreAddedImmediatelyIfMissing() throws Ex AtomicInteger actualInstalledIndexTemplates = new AtomicInteger(0); AtomicInteger actualInstalledComponentTemplates = new AtomicInteger(0); AtomicInteger actualInstalledIngestPipelines = new AtomicInteger(0); + AtomicInteger actualILMPolicies = new AtomicInteger(0); client.setVerifier( (action, request, listener) -> verifyActions( actualInstalledIndexTemplates, actualInstalledComponentTemplates, actualInstalledIngestPipelines, + actualILMPolicies, action, request, listener ) ); - apmIndexTemplateRegistry.clusterChanged(createClusterChangedEvent(Map.of(), Map.of(), nodes)); + apmIndexTemplateRegistry.clusterChanged(createClusterChangedEvent(Map.of(), Map.of(), List.of(), Map.of(), nodes)); assertBusy(() -> assertThat(actualInstalledIngestPipelines.get(), equalTo(getIndependentPipelineConfigs().size()))); assertBusy(() -> assertThat(actualInstalledComponentTemplates.get(), equalTo(getIndependentComponentTemplateConfigs().size()))); + assertBusy(() -> assertThat(actualILMPolicies.get(), equalTo(getIndependentLifecyclePolicies().size()))); - // index templates should not be installed as they are dependent in component templates and ingest pipelines + // index templates should not be installed as they are dependent on component templates and ingest pipelines assertThat(actualInstalledIndexTemplates.get(), equalTo(0)); } @@ -201,6 +209,31 @@ public void testIngestPipelines() throws Exception { }); } + public void testILMLifecyclePolicies() throws Exception { + DiscoveryNode node = DiscoveryNodeUtils.create("node"); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + final List lifecyclePolicies = apmIndexTemplateRegistry.getLifecyclePolicies(); + assertThat(lifecyclePolicies, is(not(empty()))); + + final Set expectedILMPolicies = apmIndexTemplateRegistry.getLifecyclePolicies() + .stream() + .map(LifecyclePolicy::getName) + .collect(Collectors.toSet()); + final Set installedILMPolicies = ConcurrentHashMap.newKeySet(lifecyclePolicies.size()); + client.setVerifier((a, r, l) -> { + if (a == ILMActions.PUT && r instanceof PutLifecycleRequest putLifecycleRequest) { + if (expectedILMPolicies.contains(putLifecycleRequest.getPolicy().getName())) { + installedILMPolicies.add(putLifecycleRequest.getPolicy().getName()); + } + } + return AcknowledgedResponse.TRUE; + }); + + apmIndexTemplateRegistry.clusterChanged(createClusterChangedEvent(Map.of(), Map.of(), List.of(), Map.of(), nodes)); + assertBusy(() -> { assertThat(installedILMPolicies, equalTo(expectedILMPolicies)); }); + } + public void testComponentTemplates() throws Exception { DiscoveryNode node = DiscoveryNodeUtils.create("node"); DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); @@ -208,12 +241,14 @@ public void testComponentTemplates() throws Exception { AtomicInteger actualInstalledIndexTemplates = new AtomicInteger(0); AtomicInteger actualInstalledComponentTemplates = new AtomicInteger(0); AtomicInteger actualInstalledIngestPipelines = new AtomicInteger(0); + AtomicInteger actualILMPolicies = new AtomicInteger(0); client.setVerifier( (action, request, listener) -> verifyActions( actualInstalledIndexTemplates, actualInstalledComponentTemplates, actualInstalledIngestPipelines, + actualILMPolicies, action, request, listener @@ -224,6 +259,9 @@ public void testComponentTemplates() throws Exception { Map.of(), Map.of(), apmIndexTemplateRegistry.getIngestPipelines().stream().map(IngestPipelineConfig::getId).collect(Collectors.toList()), + apmIndexTemplateRegistry.getLifecyclePolicies() + .stream() + .collect(Collectors.toMap(LifecyclePolicy::getName, Function.identity())), nodes ) ); @@ -237,8 +275,10 @@ public void testComponentTemplates() throws Exception { // ingest pipelines should not have been installed as we used a cluster state that includes them already assertThat(actualInstalledIngestPipelines.get(), equalTo(0)); - // index templates should not be installed as they are dependent in component templates and ingest pipelines + // index templates should not be installed as they are dependent on component templates and ingest pipelines assertThat(actualInstalledIndexTemplates.get(), equalTo(0)); + // ilm policies should not have been installed as we used a cluster state that includes them already + assertThat(actualILMPolicies.get(), equalTo(0)); } public void testIndexTemplates() throws Exception { @@ -248,12 +288,14 @@ public void testIndexTemplates() throws Exception { AtomicInteger actualInstalledIndexTemplates = new AtomicInteger(0); AtomicInteger actualInstalledComponentTemplates = new AtomicInteger(0); AtomicInteger actualInstalledIngestPipelines = new AtomicInteger(0); + AtomicInteger actualILMPolicies = new AtomicInteger(0); client.setVerifier( (action, request, listener) -> verifyActions( actualInstalledIndexTemplates, actualInstalledComponentTemplates, actualInstalledIngestPipelines, + actualILMPolicies, action, request, listener @@ -272,6 +314,9 @@ public void testIndexTemplates() throws Exception { componentTemplates, Map.of(), apmIndexTemplateRegistry.getIngestPipelines().stream().map(IngestPipelineConfig::getId).collect(Collectors.toList()), + apmIndexTemplateRegistry.getLifecyclePolicies() + .stream() + .collect(Collectors.toMap(LifecyclePolicy::getName, Function.identity())), nodes ) ); @@ -280,9 +325,11 @@ public void testIndexTemplates() throws Exception { () -> assertThat(actualInstalledIndexTemplates.get(), equalTo(apmIndexTemplateRegistry.getComposableTemplateConfigs().size())) ); - // ingest pipelines and component templates should not have been installed as we used a cluster state that includes them already + // ingest pipelines, component templates, and lifecycle policies should not have been installed as we used a cluster state that + // includes them already assertThat(actualInstalledComponentTemplates.get(), equalTo(0)); assertThat(actualInstalledIngestPipelines.get(), equalTo(0)); + assertThat(actualILMPolicies.get(), equalTo(0)); } public void testIndexTemplateConventions() throws Exception { @@ -408,10 +455,18 @@ private List getIndependentPipelineConfigs() { .collect(Collectors.toList()); } + private Map getIndependentLifecyclePolicies() { + // All lifecycle policies are independent + return apmIndexTemplateRegistry.getLifecyclePolicies() + .stream() + .collect(Collectors.toMap(LifecyclePolicy::getName, Function.identity())); + } + private ActionResponse verifyActions( AtomicInteger indexTemplatesCounter, AtomicInteger componentTemplatesCounter, AtomicInteger ingestPipelinesCounter, + AtomicInteger ilmPolicyCounter, ActionType action, ActionRequest request, ActionListener listener @@ -430,6 +485,9 @@ private ActionResponse verifyActions( } else if (action == PutPipelineTransportAction.TYPE) { ingestPipelinesCounter.incrementAndGet(); return AcknowledgedResponse.TRUE; + } else if (action == ILMActions.PUT) { + ilmPolicyCounter.incrementAndGet(); + return AcknowledgedResponse.TRUE; } else { fail("client called with unexpected request:" + request.toString()); return null; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java index 4fb94dce1dcd0..8fe8c8835b98d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtils.java @@ -23,6 +23,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.template.resources.TemplateResources; +import java.io.IOException; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -48,19 +49,32 @@ public static LifecyclePolicy loadPolicy( source = replaceVariables(source, variables); validate(source); - try ( - XContentParser parser = XContentType.JSON.xContent() - .createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry), source) - ) { - LifecyclePolicy policy = LifecyclePolicy.parse(parser, name); - policy.validate(); - return policy; - } + return parsePolicy(source, name, xContentRegistry, XContentType.JSON); } catch (Exception e) { throw new IllegalArgumentException("unable to load policy [" + name + "] from [" + resource + "]", e); } } + /** + * Parses lifecycle policy based on the provided content type without doing any variable substitution. + * It is caller's responsibility to do any variable substitution if required. + */ + public static LifecyclePolicy parsePolicy( + String rawPolicy, + String name, + NamedXContentRegistry xContentRegistry, + XContentType contentType + ) throws IOException { + try ( + XContentParser parser = contentType.xContent() + .createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry), rawPolicy) + ) { + LifecyclePolicy policy = LifecyclePolicy.parse(parser, name); + policy.validate(); + return policy; + } + } + private static String replaceVariables(String template, Map variables) { for (Map.Entry variable : variables.entrySet()) { template = replaceVariable(template, variable.getKey(), variable.getValue()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java index c8ddd46c5912f..a30236b2fef28 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/YamlTemplateRegistry.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComponentTemplate; @@ -22,7 +23,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.yaml.YamlXContent; +import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; +import org.elasticsearch.xpack.core.ilm.LifecyclePolicyUtils; import java.io.IOException; import java.util.Collections; @@ -48,6 +52,7 @@ public abstract class YamlTemplateRegistry extends IndexTemplateRegistry { private final Map componentTemplates; private final Map composableIndexTemplates; private final List ingestPipelines; + private final List lifecyclePolicies; private final FeatureService featureService; private volatile boolean enabled; @@ -84,6 +89,7 @@ public YamlTemplateRegistry( final List componentTemplateNames = (List) resources.get("component-templates"); final List indexTemplateNames = (List) resources.get("index-templates"); final List ingestPipelineConfigs = (List) resources.get("ingest-pipelines"); + final List lifecyclePolicyConfigs = (List) resources.get("lifecycle-policies"); componentTemplates = Optional.ofNullable(componentTemplateNames) .orElse(Collections.emptyList()) @@ -110,9 +116,16 @@ public YamlTemplateRegistry( ); }) .collect(Collectors.toList()); + lifecyclePolicies = Optional.ofNullable(lifecyclePolicyConfigs) + .orElse(Collections.emptyList()) + .stream() + .map(o -> (String) o) + .filter(templateFilter) + .map(this::loadLifecyclePolicy) + .collect(Collectors.toList()); this.featureService = featureService; } catch (IOException e) { - throw new RuntimeException(e); + throw new ElasticsearchException(e); } } @@ -178,6 +191,15 @@ public List getIngestPipelines() { } } + @Override + public List getLifecyclePolicies() { + if (enabled) { + return lifecyclePolicies; + } else { + return Collections.emptyList(); + } + } + protected abstract String getVersionProperty(); private ComponentTemplate loadComponentTemplate(String name, int version) { @@ -192,7 +214,7 @@ private ComponentTemplate loadComponentTemplate(String name, int version) { return ComponentTemplate.parse(parser); } } catch (Exception e) { - throw new RuntimeException("failed to load " + getName() + " Ingest plugin's component template: " + name, e); + throw new ElasticsearchException("failed to load " + getName() + " Ingest plugin's component template: " + name, e); } } @@ -208,7 +230,7 @@ private ComposableIndexTemplate loadIndexTemplate(String name, int version) { return ComposableIndexTemplate.parse(parser); } } catch (Exception e) { - throw new RuntimeException("failed to load " + getName() + " Ingest plugin's index template: " + name, e); + throw new ElasticsearchException("failed to load " + getName() + " Ingest plugin's index template: " + name, e); } } @@ -226,6 +248,17 @@ private IngestPipelineConfig loadIngestPipeline(String name, int version, @Nulla ); } + // IndexTemplateRegistry ensures that ILM lifecycle policies are not loaded + // when in DSL only mode. + private LifecyclePolicy loadLifecyclePolicy(String name) { + try { + var rawPolicy = loadResource(this.getClass(), "/lifecycle-policies/" + name + ".yaml"); + return LifecyclePolicyUtils.parsePolicy(rawPolicy, name, LifecyclePolicyConfig.DEFAULT_X_CONTENT_REGISTRY, XContentType.YAML); + } catch (IOException e) { + throw new ElasticsearchException(e); + } + } + @Override protected boolean applyRolloverAfterTemplateV2Update() { return true; From bd091d3d96b33adb4121f980dc4f9f7a2a87b043 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 13 Nov 2024 14:15:58 +0000 Subject: [PATCH 84/95] Add a deprecation warning that the JSON format of non-detailed errors is changing in v9 (#116330) --- .../org/elasticsearch/rest/RestResponse.java | 13 ++++++++++ .../synonyms/PutSynonymRuleActionTests.java | 2 +- .../synonyms/PutSynonymsActionTests.java | 2 +- .../AbstractHttpServerTransportTests.java | 4 ++-- .../rest/BaseRestHandlerTests.java | 18 +++++++------- .../ChunkedRestResponseBodyPartTests.java | 2 +- .../rest/RestControllerTests.java | 24 +++++++++---------- .../rest/RestHttpResponseHeadersTests.java | 2 +- .../elasticsearch/rest/RestResponseTests.java | 18 ++++++++++++++ .../rest/action/RestBuilderListenerTests.java | 6 ++--- .../rest/action/cat/RestTasksActionTests.java | 2 +- .../action/document/RestBulkActionTests.java | 2 +- .../action/search/RestSearchActionTests.java | 2 +- .../scroll/RestClearScrollActionTests.java | 2 +- .../scroll/RestSearchScrollActionTests.java | 2 +- .../test/rest/RestActionTestCase.java | 2 +- .../EnterpriseSearchBaseRestHandlerTests.java | 2 +- .../action/SecurityBaseRestHandlerTests.java | 2 +- .../apikey/ApiKeyBaseRestHandlerTests.java | 2 +- .../apikey/RestCreateApiKeyActionTests.java | 2 +- ...stCreateCrossClusterApiKeyActionTests.java | 2 +- .../apikey/RestGetApiKeyActionTests.java | 6 ++--- .../RestInvalidateApiKeyActionTests.java | 4 ++-- .../apikey/RestQueryApiKeyActionTests.java | 8 +++---- ...stUpdateCrossClusterApiKeyActionTests.java | 2 +- .../oauth2/RestGetTokenActionTests.java | 6 ++--- .../action/user/RestQueryUserActionTests.java | 4 ++-- 27 files changed, 87 insertions(+), 56 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/rest/RestResponse.java b/server/src/main/java/org/elasticsearch/rest/RestResponse.java index fd8b90a99e7f6..29cae343fb09e 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestResponse.java +++ b/server/src/main/java/org/elasticsearch/rest/RestResponse.java @@ -16,6 +16,8 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -43,6 +45,7 @@ public final class RestResponse implements Releasable { static final String STATUS = "status"; private static final Logger SUPPRESSED_ERROR_LOGGER = LogManager.getLogger("rest.suppressed"); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(AbstractRestChannel.class); private final RestStatus status; @@ -142,6 +145,16 @@ public RestResponse(RestChannel channel, RestStatus status, Exception e) throws if (params.paramAsBoolean("error_trace", false) && status != RestStatus.UNAUTHORIZED) { params = new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params); } + + if (channel.detailedErrorsEnabled() == false) { + deprecationLogger.warn( + DeprecationCategory.API, + "http_detailed_errors", + "The JSON format of non-detailed errors will change in Elasticsearch 9.0 to match the JSON structure" + + " used for detailed errors. To keep using the existing format, use the V8 REST API." + ); + } + try (XContentBuilder builder = channel.newErrorBuilder()) { build(builder, params, status, channel.detailedErrorsEnabled(), e); this.content = BytesReference.bytes(builder); diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java index 0cb4a56794c22..a1b9c59571496 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java @@ -26,7 +26,7 @@ public void testEmptyRequestBody() throws Exception { .withParams(Map.of("synonymsSet", "testSet", "synonymRuleId", "testRule")) .build(); - FakeRestChannel channel = new FakeRestChannel(request, false, 0); + FakeRestChannel channel = new FakeRestChannel(request, true, 0); try (var threadPool = createThreadPool()) { final var nodeClient = new NoOpNodeClient(threadPool); expectThrows(IllegalArgumentException.class, () -> action.handleRequest(request, channel, nodeClient)); diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java index 54dff48788f52..4dce73fcf0e89 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java @@ -26,7 +26,7 @@ public void testEmptyRequestBody() throws Exception { .withParams(Map.of("synonymsSet", "test")) .build(); - FakeRestChannel channel = new FakeRestChannel(request, false, 0); + FakeRestChannel channel = new FakeRestChannel(request, true, 0); try (var threadPool = createThreadPool()) { final var nodeClient = new NoOpNodeClient(threadPool); expectThrows(IllegalArgumentException.class, () -> action.handleRequest(request, channel, nodeClient)); diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index cf623e77f740a..19d92568e6528 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -271,7 +271,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th final RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); final RestControllerTests.AssertingChannel channel = new RestControllerTests.AssertingChannel( fakeRequest, - false, + true, RestStatus.BAD_REQUEST ); @@ -361,7 +361,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th Map> restHeaders = new HashMap<>(); restHeaders.put(Task.TRACE_PARENT_HTTP_HEADER, Collections.singletonList(traceParentValue)); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); - RestControllerTests.AssertingChannel channel = new RestControllerTests.AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + RestControllerTests.AssertingChannel channel = new RestControllerTests.AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); try ( AbstractHttpServerTransport transport = new AbstractHttpServerTransport( diff --git a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java index 9f82911ed121f..8a8bed9ca73db 100644 --- a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -73,7 +73,7 @@ public List routes() { params.put("consumed", randomAlphaOfLength(8)); params.put("unconsumed", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) @@ -108,7 +108,7 @@ public List routes() { params.put("unconsumed-first", randomAlphaOfLength(8)); params.put("unconsumed-second", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) @@ -155,7 +155,7 @@ public List routes() { params.put("very_close_to_parametre", randomAlphaOfLength(8)); params.put("very_far_from_every_consumed_parameter", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) @@ -206,7 +206,7 @@ public List routes() { params.put("consumed", randomAlphaOfLength(8)); params.put("response_param", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -238,7 +238,7 @@ public List routes() { params.put("human", null); params.put("error_trace", randomFrom("true", "false", null)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -283,7 +283,7 @@ public List routes() { params.put("size", randomAlphaOfLength(8)); params.put("time", randomAlphaOfLength(8)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); - RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + RestChannel channel = new FakeRestChannel(request, true, 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -314,7 +314,7 @@ public List routes() { new BytesArray(builder.toString()), XContentType.JSON ).build(); - final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final RestChannel channel = new FakeRestChannel(request, true, 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -341,7 +341,7 @@ public List routes() { }; final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).build(); - final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final RestChannel channel = new FakeRestChannel(request, true, 1); handler.handleRequest(request, channel, mockClient); assertTrue(restChannelConsumer.executed); assertTrue(restChannelConsumer.closed); @@ -371,7 +371,7 @@ public List routes() { new BytesArray(builder.toString()), XContentType.JSON ).build(); - final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); + final RestChannel channel = new FakeRestChannel(request, true, 1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.handleRequest(request, channel, mockClient) diff --git a/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java b/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java index eece90ed94cf9..907c16aad5fdc 100644 --- a/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java +++ b/server/src/test/java/org/elasticsearch/rest/ChunkedRestResponseBodyPartTests.java @@ -56,7 +56,7 @@ public void testEncodesChunkedXContentCorrectly() throws IOException { ToXContent.EMPTY_PARAMS, new FakeRestChannel( new FakeRestRequest.Builder(xContentRegistry()).withContent(BytesArray.EMPTY, randomXContent.type()).build(), - randomBoolean(), + true, 1 ) ); diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 8f1904ce42438..afdad1045b4de 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -161,7 +161,7 @@ public void testApplyProductSpecificResponseHeaders() { final ThreadContext threadContext = client.threadPool().getThreadContext(); final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); // the rest controller relies on the caller to stash the context, so we should expect these values here as we didn't stash the // context in this test @@ -180,7 +180,7 @@ public void testRequestWithDisallowedMultiValuedHeader() { restHeaders.put("header.1", Collections.singletonList("boo")); restHeaders.put("header.2", List.of("foo", "bar")); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); assertTrue(channel.getSendResponseCalled()); } @@ -211,7 +211,7 @@ public String getName() { }); } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.OK); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy( eq(1L), @@ -235,7 +235,7 @@ public MethodHandlers next() { return null; } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); } @@ -257,7 +257,7 @@ public MethodHandlers next() { } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); } @@ -280,7 +280,7 @@ public String getName() { })); when(spyRestController.getAllHandlers(any(), eq(fakeRequest.rawPath()))).thenAnswer(x -> handlers.iterator()); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.METHOD_NOT_ALLOWED); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.METHOD_NOT_ALLOWED); spyRestController.dispatchRequest(fakeRequest, channel, threadContext); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 405))); } @@ -290,7 +290,7 @@ public void testDispatchBadRequestEmitsMetric() { final RestController restController = new RestController(null, null, circuitBreakerService, usageService, telemetryProvider); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); restController.dispatchBadRequest(channel, threadContext, new Exception()); verify(requestsCounter).incrementBy(eq(1L), eq(Map.of(STATUS_CODE_KEY, 400))); } @@ -314,7 +314,7 @@ public MethodHandlers next() { return new MethodHandlers("/").addMethod(GET, RestApiVersion.current(), (request, channel, client) -> {}); } }); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); verify(tracer).startTrace( eq(threadContext), @@ -340,7 +340,7 @@ public void testRequestWithDisallowedMultiValuedHeaderButSameValues() { new RestResponse(RestStatus.OK, RestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY) ) ); - AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(fakeRequest, true, RestStatus.OK); restController.dispatchRequest(fakeRequest, channel, threadContext); assertTrue(channel.getSendResponseCalled()); } @@ -831,7 +831,7 @@ public void testFavicon() { final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withMethod(GET) .withPath("/favicon.ico") .build(); - final AssertingChannel channel = new AssertingChannel(fakeRestRequest, false, RestStatus.OK); + final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); restController.dispatchRequest(fakeRestRequest, channel, client.threadPool().getThreadContext()); assertTrue(channel.getSendResponseCalled()); assertThat(channel.getRestResponse().contentType(), containsString("image/x-icon")); @@ -1115,7 +1115,7 @@ public void testApiProtectionWithServerlessDisabled() { List accessiblePaths = List.of("/public", "/internal", "/hidden"); accessiblePaths.forEach(path -> { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath(path).build(); - AssertingChannel channel = new AssertingChannel(request, false, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); }); } @@ -1137,7 +1137,7 @@ public void testApiProtectionWithServerlessEnabledAsEndUser() { final Consumer> checkUnprotected = paths -> paths.forEach(path -> { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath(path).build(); - AssertingChannel channel = new AssertingChannel(request, false, RestStatus.OK); + AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); }); final Consumer> checkProtected = paths -> paths.forEach(path -> { diff --git a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java index 3b839896bc34f..4345f3c5e3fb4 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java @@ -97,7 +97,7 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { RestRequest restRequest = fakeRestRequestBuilder.build(); // Send the request and verify the response status code - FakeRestChannel restChannel = new FakeRestChannel(restRequest, false, 1); + FakeRestChannel restChannel = new FakeRestChannel(restRequest, true, 1); restController.dispatchRequest(restRequest, restChannel, new ThreadContext(Settings.EMPTY)); assertThat(restChannel.capturedResponse().status().getStatus(), is(405)); diff --git a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java index c65fd85307ece..cfed83f352951 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java @@ -93,6 +93,7 @@ public void testWithHeaders() throws Exception { assertThat(response.getHeaders().get("n1"), contains("v11", "v12")); assertThat(response.getHeaders().get("n2"), notNullValue()); assertThat(response.getHeaders().get("n2"), contains("v21", "v22")); + assertChannelWarnings(channel); } public void testEmptyChunkedBody() { @@ -117,6 +118,7 @@ public void testSimpleExceptionMessage() throws Exception { assertThat(text, not(containsString("FileNotFoundException"))); assertThat(text, not(containsString("/foo/bar"))); assertThat(text, not(containsString("error_trace"))); + assertChannelWarnings(channel); } public void testDetailedExceptionMessage() throws Exception { @@ -143,6 +145,7 @@ public void testNonElasticsearchExceptionIsNotShownAsSimpleMessage() throws Exce assertThat(text, not(containsString("FileNotFoundException[/foo/bar]"))); assertThat(text, not(containsString("error_trace"))); assertThat(text, containsString("\"error\":\"No ElasticsearchException found\"")); + assertChannelWarnings(channel); } public void testErrorTrace() throws Exception { @@ -174,6 +177,7 @@ public void testAuthenticationFailedNoStackTrace() throws IOException { RestResponse response = new RestResponse(channel, authnException); assertThat(response.status(), is(RestStatus.UNAUTHORIZED)); assertThat(response.content().utf8ToString(), not(containsString(ElasticsearchException.STACK_TRACE))); + assertChannelWarnings(channel); } } } @@ -198,6 +202,7 @@ public void testStackTrace() throws IOException { } else { assertThat(response.content().utf8ToString(), not(containsString(ElasticsearchException.STACK_TRACE))); } + assertChannelWarnings(channel); } } } @@ -229,6 +234,7 @@ public void testNullThrowable() throws Exception { String text = response.content().utf8ToString(); assertThat(text, containsString("\"error\":\"unknown\"")); assertThat(text, not(containsString("error_trace"))); + assertChannelWarnings(channel); } public void testConvert() throws IOException { @@ -429,6 +435,7 @@ public void testErrorToAndFromXContent() throws IOException { assertEquals(expected.status(), parsedError.status()); assertDeepEquals(expected, parsedError); + assertChannelWarnings(channel); } public void testNoErrorFromXContent() throws IOException { @@ -495,6 +502,7 @@ public void testResponseContentTypeUponException() throws Exception { Exception t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); RestResponse response = new RestResponse(channel, t); assertThat(response.contentType(), equalTo(mediaType)); + assertChannelWarnings(channel); } public void testSupressedLogging() throws IOException { @@ -526,6 +534,7 @@ public void testSupressedLogging() throws IOException { "401", "unauthorized" ); + assertChannelWarnings(channel); } private void assertLogging( @@ -551,6 +560,15 @@ private void assertLogging( } } + private void assertChannelWarnings(RestChannel channel) { + if (channel.detailedErrorsEnabled() == false) { + assertWarnings( + "The JSON format of non-detailed errors will change in Elasticsearch 9.0" + + " to match the JSON structure used for detailed errors. To keep using the existing format, use the V8 REST API." + ); + } + } + public static class WithHeadersException extends ElasticsearchException { WithHeadersException() { diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java index 827a07b89b2b8..03ae366050646 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/RestBuilderListenerTests.java @@ -26,7 +26,7 @@ public class RestBuilderListenerTests extends ESTestCase { public void testXContentBuilderClosedInBuildResponse() throws Exception { AtomicReference builderAtomicReference = new AtomicReference<>(); RestBuilderListener builderListener = new RestBuilderListener( - new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1) + new FakeRestChannel(new FakeRestRequest(), true, 1) ) { @Override public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { @@ -44,7 +44,7 @@ public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws E public void testXContentBuilderNotClosedInBuildResponseAssertionsDisabled() throws Exception { AtomicReference builderAtomicReference = new AtomicReference<>(); RestBuilderListener builderListener = new RestBuilderListener( - new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1) + new FakeRestChannel(new FakeRestRequest(), true, 1) ) { @Override public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { @@ -68,7 +68,7 @@ public void testXContentBuilderNotClosedInBuildResponseAssertionsEnabled() throw assumeTrue("tests are not being run with assertions", RestBuilderListener.class.desiredAssertionStatus()); RestBuilderListener builderListener = new RestBuilderListener( - new FakeRestChannel(new FakeRestRequest(), randomBoolean(), 1) + new FakeRestChannel(new FakeRestRequest(), true, 1) ) { @Override public RestResponse buildResponse(Empty empty, XContentBuilder builder) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java index 880a0bc9eabd7..8104ecfc31c3d 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java @@ -34,7 +34,7 @@ public void testConsumesParameters() throws Exception { FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams( Map.of("parent_task_id", "the node:3", "nodes", "node1,node2", "actions", "*") ).build(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, false, 1); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, 1); try (var threadPool = createThreadPool()) { final var nodeClient = buildNodeClient(threadPool); action.handleRequest(fakeRestRequest, fakeRestChannel, nodeClient); diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java index 3b6b280565da5..0d35e4311032d 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java @@ -222,7 +222,7 @@ public void next() { }) .withHeaders(Map.of("Content-Type", Collections.singletonList("application/json"))) .build(); - FakeRestChannel channel = new FakeRestChannel(request, false, 1); + FakeRestChannel channel = new FakeRestChannel(request, true, 1); RestBulkAction.ChunkHandler chunkHandler = new RestBulkAction.ChunkHandler( true, diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java index 24f59a8c3abe7..4822b1c64cf41 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/search/RestSearchActionTests.java @@ -51,7 +51,7 @@ public void testEnableFieldsEmulationNoErrors() throws Exception { .withParams(params) .build(); - action.handleRequest(request, new FakeRestChannel(request, false, 1), verifyingClient); + action.handleRequest(request, new FakeRestChannel(request, true, 1), verifyingClient); } public void testValidateSearchRequest() { diff --git a/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java b/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java index 0c95340fdb6f7..33978b4cd6b9f 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java @@ -54,7 +54,7 @@ public void clearScroll(ClearScrollRequest request, ActionListener routes() { }; FakeRestRequest fakeRestRequest = new FakeRestRequest(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), isLicensed ? 0 : 1); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, isLicensed ? 0 : 1); try (var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java index 5d4ea0f30cb15..8509a6475aa71 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/SecurityBaseRestHandlerTests.java @@ -58,7 +58,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien } }; FakeRestRequest fakeRestRequest = new FakeRestRequest(); - FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), securityEnabled ? 0 : 1); + FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, securityEnabled ? 0 : 1); try (var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java index 6ff05faf22d11..b734e602ec291 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/ApiKeyBaseRestHandlerTests.java @@ -56,7 +56,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien } }; final var fakeRestRequest = new FakeRestRequest(); - final var fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), requiredSettingsEnabled ? 0 : 1); + final var fakeRestChannel = new FakeRestChannel(fakeRestRequest, true, requiredSettingsEnabled ? 0 : 1); try (var threadPool = createThreadPool()) { final var client = new NoOpNodeClient(threadPool); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 9a05230d82ae6..79dba637d53d0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -75,7 +75,7 @@ public void testCreateApiKeyApi() throws Exception { ).withParams(Collections.singletonMap("refresh", randomFrom("false", "true", "wait_for"))).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java index 812354986d5bc..a47855731b37a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateCrossClusterApiKeyActionTests.java @@ -115,7 +115,7 @@ public void testLicenseEnforcement() throws Exception { } }"""), XContentType.JSON).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index d88a217cd0949..c65634a76b532 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -91,7 +91,7 @@ public void testGetApiKey() throws Exception { final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(params).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -159,7 +159,7 @@ public void testGetApiKeyWithProfileUid() throws Exception { } final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(param).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -224,7 +224,7 @@ public void testGetApiKeyOwnedByCurrentAuthenticatedUser() throws Exception { final FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(param).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index ac472378d4874..2cb1b6a66b02b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -77,7 +77,7 @@ public void testInvalidateApiKey() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -144,7 +144,7 @@ public void testInvalidateApiKeyOwnedByCurrentAuthenticatedUser() throws Excepti ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java index d5aa249b1d0f5..7005b5158e626 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java @@ -110,7 +110,7 @@ public void testQueryParsing() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -184,7 +184,7 @@ public void testAggsAndAggregationsTogether() { XContentType.JSON ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -230,7 +230,7 @@ public void testParsingSearchParameters() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -290,7 +290,7 @@ public void testQueryApiKeyWithProfileUid() throws Exception { } FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withParams(param).build(); SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java index ddeffc0675498..879e1ac8ad157 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestUpdateCrossClusterApiKeyActionTests.java @@ -97,7 +97,7 @@ public void testLicenseEnforcement() throws Exception { XContentType.JSON ).withParams(Map.of("id", randomAlphaOfLength(10))).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java index 2ac33a780313e..bd665560f425f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/oauth2/RestGetTokenActionTests.java @@ -43,7 +43,7 @@ public class RestGetTokenActionTests extends ESTestCase { public void testListenerHandlesExceptionProperly() { FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); final SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -67,7 +67,7 @@ public void sendResponse(RestResponse restResponse) { public void testSendResponse() { FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); final SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -114,7 +114,7 @@ public void sendResponse(RestResponse restResponse) { public void testSendResponseKerberosError() { FakeRestRequest restRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); final SetOnce responseSetOnce = new SetOnce<>(); - RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java index 4a593eeb24ac6..38405a2167808 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java @@ -73,7 +73,7 @@ public void testQueryParsing() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); @@ -132,7 +132,7 @@ public void testParsingSearchParameters() throws Exception { ).build(); final SetOnce responseSetOnce = new SetOnce<>(); - final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + final RestChannel restChannel = new AbstractRestChannel(restRequest, true) { @Override public void sendResponse(RestResponse restResponse) { responseSetOnce.set(restResponse); From 59602a9f995cc5a8a9a3d7c9a17c38fe4785bfef Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 13 Nov 2024 14:22:50 +0000 Subject: [PATCH 85/95] [ML] Pass inference timeout to start deployment (#116725) Default inference endpoints automatically deploy the model on inference the inference timeout is now passed to start model deployment so users can control that timeout --- .../elasticsearch/inference/InferenceService.java | 14 ++------------ .../mock/AbstractTestInferenceService.java | 3 ++- .../action/TransportPutInferenceModelAction.java | 15 +++++++++++---- .../xpack/inference/services/SenderService.java | 7 +++++-- .../BaseElasticsearchInternalService.java | 14 ++++++-------- .../elasticsearch/ElasticDeployedModel.java | 3 ++- .../elasticsearch/ElasticsearchInternalModel.java | 4 +++- 7 files changed, 31 insertions(+), 29 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index f7b688ba37963..c6e09f61befa0 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -139,9 +139,10 @@ void chunkedInfer( /** * Start or prepare the model for use. * @param model The model + * @param timeout Start timeout * @param listener The listener */ - void start(Model model, ActionListener listener); + void start(Model model, TimeValue timeout, ActionListener listener); /** * Stop the model deployment. @@ -153,17 +154,6 @@ default void stop(UnparsedModel unparsedModel, ActionListener listener) listener.onResponse(true); } - /** - * Put the model definition (if applicable) - * The main purpose of this function is to download ELSER - * The default action does nothing except acknowledge the request (true). - * @param modelVariant The configuration of the model variant to be downloaded - * @param listener The listener - */ - default void putModel(Model modelVariant, ActionListener listener) { - listener.onResponse(true); - } - /** * Optionally test the new model configuration in the inference service. * This function should be called when the model is first created, the diff --git a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java index 6496bcdd89f21..3be85ee857bbb 100644 --- a/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java +++ b/x-pack/plugin/inference/qa/test-service-plugin/src/main/java/org/elasticsearch/xpack/inference/mock/AbstractTestInferenceService.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; @@ -90,7 +91,7 @@ public Model parsePersistedConfig(String modelId, TaskType taskType, Map serviceSettingsMap); @Override - public void start(Model model, ActionListener listener) { + public void start(Model model, TimeValue timeout, ActionListener listener) { listener.onResponse(true); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java index 64eeed82ee1b9..2baee7f8afd66 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.StrictDynamicMappingException; import org.elasticsearch.inference.InferenceService; import org.elasticsearch.inference.InferenceServiceRegistry; @@ -159,7 +160,7 @@ protected void masterOperation( return; } - parseAndStoreModel(service.get(), request.getInferenceEntityId(), resolvedTaskType, requestAsMap, listener); + parseAndStoreModel(service.get(), request.getInferenceEntityId(), resolvedTaskType, requestAsMap, request.ackTimeout(), listener); } private void parseAndStoreModel( @@ -167,12 +168,13 @@ private void parseAndStoreModel( String inferenceEntityId, TaskType taskType, Map config, + TimeValue timeout, ActionListener listener ) { ActionListener storeModelListener = listener.delegateFailureAndWrap( (delegate, verifiedModel) -> modelRegistry.storeModel( verifiedModel, - ActionListener.wrap(r -> startInferenceEndpoint(service, verifiedModel, delegate), e -> { + ActionListener.wrap(r -> startInferenceEndpoint(service, timeout, verifiedModel, delegate), e -> { if (e.getCause() instanceof StrictDynamicMappingException && e.getCause().getMessage().contains("chunking_settings")) { delegate.onFailure( new ElasticsearchStatusException( @@ -199,11 +201,16 @@ private void parseAndStoreModel( service.parseRequestConfig(inferenceEntityId, taskType, config, parsedModelListener); } - private void startInferenceEndpoint(InferenceService service, Model model, ActionListener listener) { + private void startInferenceEndpoint( + InferenceService service, + TimeValue timeout, + Model model, + ActionListener listener + ) { if (skipValidationAndStart) { listener.onResponse(new PutInferenceModelAction.Response(model.getConfigurations())); } else { - service.start(model, listener.map(started -> new PutInferenceModelAction.Response(model.getConfigurations()))); + service.start(model, timeout, listener.map(started -> new PutInferenceModelAction.Response(model.getConfigurations()))); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java index 953cf4cf6ad77..b8a99227cf517 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/SenderService.java @@ -104,13 +104,16 @@ protected abstract void doChunkedInfer( ActionListener> listener ); - @Override public void start(Model model, ActionListener listener) { init(); - doStart(model, listener); } + @Override + public void start(Model model, @Nullable TimeValue unused, ActionListener listener) { + start(model, listener); + } + protected void doStart(Model model, ActionListener listener) { listener.onResponse(true); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java index 5f97f3bad3dc8..922b366498c27 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/BaseElasticsearchInternalService.java @@ -83,7 +83,7 @@ public BaseElasticsearchInternalService( } @Override - public void start(Model model, ActionListener finalListener) { + public void start(Model model, TimeValue timeout, ActionListener finalListener) { if (model instanceof ElasticsearchInternalModel esModel) { if (supportedTaskTypes().contains(model.getTaskType()) == false) { finalListener.onFailure( @@ -107,7 +107,7 @@ public void start(Model model, ActionListener finalListener) { } }) .andThen((l2, modelDidPut) -> { - var startRequest = esModel.getStartTrainedModelDeploymentActionRequest(); + var startRequest = esModel.getStartTrainedModelDeploymentActionRequest(timeout); var responseListener = esModel.getCreateTrainedModelAssignmentActionListener(model, finalListener); client.execute(StartTrainedModelDeploymentAction.INSTANCE, startRequest, responseListener); }) @@ -149,8 +149,7 @@ protected static IllegalStateException notElasticsearchModelException(Model mode ); } - @Override - public void putModel(Model model, ActionListener listener) { + protected void putModel(Model model, ActionListener listener) { if (model instanceof ElasticsearchInternalModel == false) { listener.onFailure(notElasticsearchModelException(model)); return; @@ -303,10 +302,9 @@ protected void maybeStartDeployment( } if (isDefaultId(model.getInferenceEntityId()) && ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { - this.start( - model, - listener.delegateFailureAndWrap((l, started) -> { client.execute(InferModelAction.INSTANCE, request, listener); }) - ); + this.start(model, request.getInferenceTimeout(), listener.delegateFailureAndWrap((l, started) -> { + client.execute(InferModelAction.INSTANCE, request, listener); + })); } else { listener.onFailure(e); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java index 996ef6816025d..724c7a8f0a166 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticDeployedModel.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.inference.services.elasticsearch; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; @@ -31,7 +32,7 @@ public boolean usesExistingDeployment() { } @Override - public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest() { + public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest(TimeValue timeout) { throw new IllegalStateException("cannot start model that uses an existing deployment"); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java index 8b2969c39b7ba..2405243f302bc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.inference.ChunkingSettings; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; @@ -67,11 +68,12 @@ public ElasticsearchInternalModel( this.internalServiceSettings = internalServiceSettings; } - public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest() { + public StartTrainedModelDeploymentAction.Request getStartTrainedModelDeploymentActionRequest(TimeValue timeout) { var startRequest = new StartTrainedModelDeploymentAction.Request(internalServiceSettings.modelId(), this.getInferenceEntityId()); startRequest.setNumberOfAllocations(internalServiceSettings.getNumAllocations()); startRequest.setThreadsPerAllocation(internalServiceSettings.getNumThreads()); startRequest.setAdaptiveAllocationsSettings(internalServiceSettings.getAdaptiveAllocationsSettings()); + startRequest.setTimeout(timeout); startRequest.setWaitForState(STARTED); return startRequest; From a9f2f33ccfc8b75391e01f5e389fcfa1f21337b4 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Wed, 13 Nov 2024 16:49:06 +0200 Subject: [PATCH 86/95] Adding patch version from 8.16 for skip_inner_hits_search_source (#116724) --- .../main/java/org/elasticsearch/TransportVersions.java | 1 + .../search/builder/SearchSourceBuilder.java | 9 ++++----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 3815d1bba18c3..661f057bfc5ff 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -176,6 +176,7 @@ static TransportVersion def(int id) { public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16 = def(8_772_00_2); + public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE_BACKPORT_8_16 = def(8_772_00_3); public static final TransportVersion REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_773_00_0); public static final TransportVersion REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_774_00_0); public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 699c39a652f15..098a2b2f45d2f 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -292,7 +292,8 @@ public SearchSourceBuilder(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { rankBuilder = in.readOptionalNamedWriteable(RankBuilder.class); } - if (in.getTransportVersion().onOrAfter(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE)) { + if (in.getTransportVersion().isPatchFrom(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE_BACKPORT_8_16) + || in.getTransportVersion().onOrAfter(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE)) { skipInnerHits = in.readBoolean(); } else { skipInnerHits = false; @@ -386,7 +387,8 @@ public void writeTo(StreamOutput out) throws IOException { } else if (rankBuilder != null) { throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion().toReleaseVersion() + "]"); } - if (out.getTransportVersion().onOrAfter(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE)) { + if (out.getTransportVersion().isPatchFrom(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE_BACKPORT_8_16) + || out.getTransportVersion().onOrAfter(TransportVersions.SKIP_INNER_HITS_SEARCH_SOURCE)) { out.writeBoolean(skipInnerHits); } } @@ -1849,9 +1851,6 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t if (false == runtimeMappings.isEmpty()) { builder.field(RUNTIME_MAPPINGS_FIELD.getPreferredName(), runtimeMappings); } - if (skipInnerHits) { - builder.field("skipInnerHits", true); - } return builder; } From 7eb37bd10192a07eaeeb2e2ccf223ef9f49890f5 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 14 Nov 2024 02:01:59 +1100 Subject: [PATCH 87/95] Mute org.elasticsearch.xpack.inference.InferenceRestIT org.elasticsearch.xpack.inference.InferenceRestIT #116740 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index fa7ce1509574e..b860f4b6c4b5f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -239,6 +239,8 @@ tests: - class: org.elasticsearch.snapshots.SnapshotShutdownIT method: testRestartNodeDuringSnapshot issue: https://github.com/elastic/elasticsearch/issues/116730 +- class: org.elasticsearch.xpack.inference.InferenceRestIT + issue: https://github.com/elastic/elasticsearch/issues/116740 # Examples: # From b42dbab0a4b951a87803f6256b310f3b07d0318a Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Wed, 13 Nov 2024 09:48:30 -0600 Subject: [PATCH 88/95] Bump Netty to 4.1.115.Final (#116696) This commit bumps Netty from 4.1.109.Final to 4.1.115.Final --- build-tools-internal/version.properties | 2 +- gradle/verification-metadata.xml | 84 ++++++++++++------------- modules/transport-netty4/build.gradle | 11 +++- x-pack/plugin/inference/build.gradle | 11 +++- 4 files changed, 59 insertions(+), 49 deletions(-) diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index c3511dd5d256c..29c5bc16a8c4a 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -14,7 +14,7 @@ log4j = 2.19.0 slf4j = 2.0.6 ecsLogging = 1.2.0 jna = 5.12.1 -netty = 4.1.109.Final +netty = 4.1.115.Final commons_lang3 = 3.9 google_oauth_client = 1.34.1 diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 7c1e11f390f04..2b8f1b2a09ad9 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -1366,9 +1366,9 @@ - - - + + + @@ -1376,9 +1376,9 @@ - - - + + + @@ -1386,29 +1386,29 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -1416,9 +1416,9 @@ - - - + + + @@ -1426,14 +1426,14 @@ - - - + + + - - - + + + @@ -1441,14 +1441,14 @@ - - - + + + - - - + + + @@ -1456,9 +1456,9 @@ - - - + + + @@ -1466,9 +1466,9 @@ - - - + + + diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 8dc718a818cec..13dfdf2b3c7bc 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -177,9 +177,8 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.nano.CodedOutputByteBufferNano', 'com.google.protobuf.nano.MessageNano', 'com.github.luben.zstd.Zstd', - 'com.github.luben.zstd.BaseZstdBufferDecompressingStreamNoFinalizer', - 'com.github.luben.zstd.ZstdBufferDecompressingStreamNoFinalizer', - 'com.github.luben.zstd.ZstdDirectBufferDecompressingStreamNoFinalizer', + 'com.github.luben.zstd.ZstdInputStreamNoFinalizer', + 'com.github.luben.zstd.util.Native', 'com.jcraft.jzlib.Deflater', 'com.jcraft.jzlib.Inflater', 'com.jcraft.jzlib.JZlib$WrapperType', @@ -231,8 +230,14 @@ tasks.named("thirdPartyAudit").configure { 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', diff --git a/x-pack/plugin/inference/build.gradle b/x-pack/plugin/inference/build.gradle index 15a2d0eb41368..29d5add35ff49 100644 --- a/x-pack/plugin/inference/build.gradle +++ b/x-pack/plugin/inference/build.gradle @@ -205,8 +205,14 @@ tasks.named("thirdPartyAudit").configure { 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.unpadded.MpscUnpaddedArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess' ) ignoreMissingClasses( @@ -320,10 +326,9 @@ tasks.named("thirdPartyAudit").configure { 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', - 'com.github.luben.zstd.BaseZstdBufferDecompressingStreamNoFinalizer', 'com.github.luben.zstd.Zstd', - 'com.github.luben.zstd.ZstdBufferDecompressingStreamNoFinalizer', - 'com.github.luben.zstd.ZstdDirectBufferDecompressingStreamNoFinalizer', + 'com.github.luben.zstd.ZstdInputStreamNoFinalizer', + 'com.github.luben.zstd.util.Native', 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', 'com.google.protobuf.nano.CodedOutputByteBufferNano', 'com.google.protobuf.nano.MessageNano', From 5ec0a843e186cf7f2a8f1d6120e28f0db4662e38 Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Wed, 13 Nov 2024 15:59:20 +0000 Subject: [PATCH 89/95] Remove `ecs` option on `user_agent` processor (#116077) This removes the `ecs` option on the `user_agent` ingest processor, which is deprecated (way back in 6.7) and ignored. It will no longer be possible to create instances with this option, and the option will be removed from instances persisted in the cluster state on startup. The mechanism to do this upgrade on startup is designed to be reusable for other upgrades either to ingest processors or more generally to any custom metadata. It is inspired by the existing mechanism to upgrade index templates. --- docs/changelog/116077.yaml | 14 ++ .../src/main/java/module-info.java | 1 + .../useragent/IngestUserAgentPlugin.java | 14 ++ .../ingest/useragent/UserAgentProcessor.java | 25 ++- .../useragent/UserAgentProcessorTests.java | 14 ++ .../gateway/GatewayMetaState.java | 17 +- .../elasticsearch/ingest/IngestMetadata.java | 35 ++++ .../ingest/PipelineConfiguration.java | 33 +++ .../elasticsearch/node/NodeConstruction.java | 7 +- .../plugins/MetadataUpgrader.java | 36 +++- .../org/elasticsearch/plugins/Plugin.java | 17 ++ .../gateway/GatewayMetaStateTests.java | 103 +++++++++- .../ingest/IngestMetadataTests.java | 188 ++++++++++++++++-- 13 files changed, 466 insertions(+), 38 deletions(-) create mode 100644 docs/changelog/116077.yaml diff --git a/docs/changelog/116077.yaml b/docs/changelog/116077.yaml new file mode 100644 index 0000000000000..7c499c9b7acf4 --- /dev/null +++ b/docs/changelog/116077.yaml @@ -0,0 +1,14 @@ +pr: 116077 +summary: Remove `ecs` option on `user_agent` processor +area: Ingest Node +type: breaking +issues: [] +breaking: + title: Remove `ecs` option on `user_agent` processor + area: Ingest + details: >- + The `user_agent` ingest processor no longer accepts the `ecs` option. (It was previously deprecated and ignored.) + impact: >- + Users should stop using the `ecs` option when creating instances of the `user_agent` ingest processor. + The option will be removed from existing processors stored in the cluster state on upgrade. + notable: false diff --git a/modules/ingest-user-agent/src/main/java/module-info.java b/modules/ingest-user-agent/src/main/java/module-info.java index e17dab83d5754..ef0af652f50b3 100644 --- a/modules/ingest-user-agent/src/main/java/module-info.java +++ b/modules/ingest-user-agent/src/main/java/module-info.java @@ -10,4 +10,5 @@ module org.elasticsearch.ingest.useragent { requires org.elasticsearch.server; requires org.elasticsearch.xcontent; + requires org.elasticsearch.base; } diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java index 6262c26cb752f..4d71417ec982c 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java @@ -9,7 +9,9 @@ package org.elasticsearch.ingest.useragent; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.Processor; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; @@ -23,6 +25,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.UnaryOperator; import java.util.stream.Stream; public class IngestUserAgentPlugin extends Plugin implements IngestPlugin { @@ -97,4 +100,15 @@ static Map createUserAgentParsers(Path userAgentConfigD public List> getSettings() { return List.of(CACHE_SIZE_SETTING); } + + @Override + public Map> getCustomMetadataUpgraders() { + return Map.of( + IngestMetadata.TYPE, + ingestMetadata -> ((IngestMetadata) ingestMetadata).maybeUpgradeProcessors( + UserAgentProcessor.TYPE, + UserAgentProcessor::maybeUpgradeConfig + ) + ); + } } diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java index 6224bb4d502d7..08ec00e0f04cf 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java @@ -9,9 +9,8 @@ package org.elasticsearch.ingest.useragent; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; @@ -32,8 +31,6 @@ public class UserAgentProcessor extends AbstractProcessor { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(UserAgentProcessor.class); - public static final String TYPE = "user_agent"; private final String field; @@ -198,21 +195,13 @@ public UserAgentProcessor create( String processorTag, String description, Map config - ) throws Exception { + ) { String field = readStringProperty(TYPE, processorTag, config, "field"); String targetField = readStringProperty(TYPE, processorTag, config, "target_field", "user_agent"); String regexFilename = readStringProperty(TYPE, processorTag, config, "regex_file", IngestUserAgentPlugin.DEFAULT_PARSER_NAME); List propertyNames = readOptionalList(TYPE, processorTag, config, "properties"); boolean extractDeviceType = readBooleanProperty(TYPE, processorTag, config, "extract_device_type", false); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); - Object ecsValue = config.remove("ecs"); - if (ecsValue != null) { - deprecationLogger.warn( - DeprecationCategory.SETTINGS, - "ingest_useragent_ecs_settings", - "setting [ecs] is deprecated as ECS format is the default and only option" - ); - } UserAgentParser parser = userAgentParsers.get(regexFilename); if (parser == null) { @@ -272,4 +261,14 @@ public static Property parseProperty(String propertyName) { } } } + + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) + // This can be removed in V10. It's not possible to create an instance with the ecs property in V9, and all instances created by V8 or + // earlier will have been fixed when upgraded to V9. + static boolean maybeUpgradeConfig(Map config) { + // Instances created using ES 8.x (or earlier) may have the 'ecs' config entry. + // This was ignored in 8.x and is unsupported in 9.0. + // In 9.x, we should remove it from any existing processors on startup. + return config.remove("ecs") != null; + } } diff --git a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java index d9459404987df..471015d579012 100644 --- a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java +++ b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java @@ -331,4 +331,18 @@ public void testExtractDeviceTypeDisabled() { device.put("name", "Other"); assertThat(target.get("device"), is(device)); } + + public void testMaybeUpgradeConfig_removesEcsIfPresent() { + Map config = new HashMap<>(Map.of("field", "user-agent", "ecs", "whatever")); + boolean changed = UserAgentProcessor.maybeUpgradeConfig(config); + assertThat(changed, is(true)); + assertThat(config, is(Map.of("field", "user-agent"))); + } + + public void testMaybeUpgradeConfig_doesNothingIfEcsAbsent() { + Map config = new HashMap<>(Map.of("field", "user-agent")); + boolean changed = UserAgentProcessor.maybeUpgradeConfig(config); + assertThat(changed, is(false)); + assertThat(config, is(Map.of("field", "user-agent"))); + } } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index a7baca59e1857..bf2387453145d 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -300,7 +300,7 @@ static Metadata upgradeMetadata(Metadata metadata, IndexMetadataVerifier indexMe upgradedMetadata.put(newMetadata, false); } // upgrade current templates - if (applyPluginUpgraders( + if (applyPluginTemplateUpgraders( metadata.getTemplates(), metadataUpgrader.indexTemplateMetadataUpgraders, upgradedMetadata::removeTemplate, @@ -308,10 +308,23 @@ static Metadata upgradeMetadata(Metadata metadata, IndexMetadataVerifier indexMe )) { changed = true; } + // upgrade custom metadata + for (Map.Entry> entry : metadataUpgrader.customMetadataUpgraders.entrySet()) { + String type = entry.getKey(); + Function upgrader = entry.getValue(); + Metadata.Custom original = metadata.custom(type); + if (original != null) { + Metadata.Custom upgraded = upgrader.apply(original); + if (upgraded.equals(original) == false) { + upgradedMetadata.putCustom(type, upgraded); + changed = true; + } + } + } return changed ? upgradedMetadata.build() : metadata; } - private static boolean applyPluginUpgraders( + private static boolean applyPluginTemplateUpgraders( Map existingData, UnaryOperator> upgrader, Consumer removeData, diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java b/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java index 316f621e80669..8654142016572 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java @@ -169,4 +169,39 @@ public boolean equals(Object o) { public int hashCode() { return pipelines.hashCode(); } + + /** + * Returns a copy of this object with processor upgrades applied, if necessary. Otherwise, returns this object. + * + *

The given upgrader is applied to the config map for any processor of the given type. + */ + public IngestMetadata maybeUpgradeProcessors(String processorType, ProcessorConfigUpgrader processorConfigUpgrader) { + Map newPipelines = null; // as an optimization, we will lazily copy the map only if needed + for (Map.Entry entry : pipelines.entrySet()) { + String pipelineId = entry.getKey(); + PipelineConfiguration originalPipeline = entry.getValue(); + PipelineConfiguration upgradedPipeline = originalPipeline.maybeUpgradeProcessors(processorType, processorConfigUpgrader); + if (upgradedPipeline.equals(originalPipeline) == false) { + if (newPipelines == null) { + newPipelines = new HashMap<>(pipelines); + } + newPipelines.put(pipelineId, upgradedPipeline); + } + } + return newPipelines != null ? new IngestMetadata(newPipelines) : this; + } + + /** + * Functional interface for upgrading processor configs. An implementation of this will be associated with a specific processor type. + */ + public interface ProcessorConfigUpgrader { + + /** + * Upgrades the config for an individual processor of the appropriate type, if necessary. + * + * @param processorConfig The config to upgrade, which will be mutated if required + * @return Whether an upgrade was required + */ + boolean maybeUpgrade(Map processorConfig); + } } diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index 7406ee8837264..9067cdb2040fd 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -24,6 +24,7 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Map; import java.util.Objects; @@ -156,4 +157,36 @@ public int hashCode() { result = 31 * result + getConfigAsMap().hashCode(); return result; } + + /** + * Returns a copy of this object with processor upgrades applied, if necessary. Otherwise, returns this object. + * + *

The given upgrader is applied to the config map for any processor of the given type. + */ + PipelineConfiguration maybeUpgradeProcessors(String type, IngestMetadata.ProcessorConfigUpgrader upgrader) { + Map mutableConfigMap = getConfigAsMap(); + boolean changed = false; + // This should be a List of Maps, where the keys are processor types and the values are config maps. + // But we'll skip upgrading rather than fail if not. + if (mutableConfigMap.get(Pipeline.PROCESSORS_KEY) instanceof Iterable processors) { + for (Object processor : processors) { + if (processor instanceof Map processorMap && processorMap.get(type) instanceof Map targetProcessor) { + @SuppressWarnings("unchecked") // All XContent maps will be + Map processorConfigMap = (Map) targetProcessor; + if (upgrader.maybeUpgrade(processorConfigMap)) { + changed = true; + } + } + } + } + if (changed) { + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + return new PipelineConfiguration(id, BytesReference.bytes(builder.map(mutableConfigMap)), xContentType); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } else { + return this; + } + } } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index b424b417da82b..c883fca8d047f 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -47,6 +47,7 @@ import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; @@ -232,6 +233,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import java.util.function.UnaryOperator; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -970,7 +972,9 @@ private void construct( ); var indexTemplateMetadataUpgraders = pluginsService.map(Plugin::getIndexTemplateMetadataUpgrader).toList(); - modules.bindToInstance(MetadataUpgrader.class, new MetadataUpgrader(indexTemplateMetadataUpgraders)); + List>> customMetadataUpgraders = pluginsService.map(Plugin::getCustomMetadataUpgraders) + .toList(); + modules.bindToInstance(MetadataUpgrader.class, new MetadataUpgrader(indexTemplateMetadataUpgraders, customMetadataUpgraders)); final IndexMetadataVerifier indexMetadataVerifier = new IndexMetadataVerifier( settings, @@ -1463,6 +1467,7 @@ private CircuitBreakerService createCircuitBreakerService( /** * Wrap a group of reloadable plugins into a single reloadable plugin interface + * * @param reloadablePlugins A list of reloadable plugins * @return A single ReloadablePlugin that, upon reload, reloads the plugins it wraps */ diff --git a/server/src/main/java/org/elasticsearch/plugins/MetadataUpgrader.java b/server/src/main/java/org/elasticsearch/plugins/MetadataUpgrader.java index 6ad66f75304d7..3db2d136ce347 100644 --- a/server/src/main/java/org/elasticsearch/plugins/MetadataUpgrader.java +++ b/server/src/main/java/org/elasticsearch/plugins/MetadataUpgrader.java @@ -14,16 +14,26 @@ import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.UnaryOperator; +import static java.util.stream.Collectors.collectingAndThen; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.mapping; +import static java.util.stream.Collectors.toList; + /** * Upgrades {@link Metadata} on startup on behalf of installed {@link Plugin}s */ public class MetadataUpgrader { public final UnaryOperator> indexTemplateMetadataUpgraders; + public final Map> customMetadataUpgraders; - public MetadataUpgrader(Collection>> indexTemplateMetadataUpgraders) { + public MetadataUpgrader( + Collection>> indexTemplateMetadataUpgraders, + Collection>> customMetadataUpgraders + ) { this.indexTemplateMetadataUpgraders = templates -> { Map upgradedTemplates = new HashMap<>(templates); for (UnaryOperator> upgrader : indexTemplateMetadataUpgraders) { @@ -31,5 +41,29 @@ public MetadataUpgrader(Collection map.entrySet().stream()) + .collect( + groupingBy( + // Group by the type of custom metadata to be upgraded (the entry key) + Map.Entry::getKey, + // For each type, extract the operators (the entry values), collect to a list, and make an operator which combines them + collectingAndThen(mapping(Map.Entry::getValue, toList()), CombiningCustomUpgrader::new) + ) + ); + } + + private record CombiningCustomUpgrader(List> upgraders) implements UnaryOperator { + + @Override + public Metadata.Custom apply(Metadata.Custom custom) { + Metadata.Custom upgraded = custom; + for (UnaryOperator upgrader : upgraders) { + upgraded = upgrader.apply(upgraded); + } + return upgraded; + } } + } diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index 725cd271e10f8..1ccb5331a45d7 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; @@ -248,6 +249,22 @@ public UnaryOperator> getIndexTemplateMetadat return UnaryOperator.identity(); } + /** + * Returns operators to modify custom metadata in the cluster state on startup. + * + *

Each key of the map returned gives the type of custom to be modified. Each value is an operator to be applied to that custom + * metadata. The operator will be invoked with the result of calling {@link Metadata#custom(String)} with the map key as its argument, + * and should downcast the value accordingly. + * + *

Plugins should return an empty map if no upgrade is required. + * + *

The order of the upgrade calls is undefined and can change between runs. It is expected that plugins will modify only templates + * owned by them to avoid conflicts. + */ + public Map> getCustomMetadataUpgraders() { + return Map.of(); + } + /** * Provides the list of this plugin's custom thread pools, empty if * none. diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index 7628ee8c954b4..a161794e35b91 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -31,8 +31,10 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.function.UnaryOperator; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -47,7 +49,7 @@ public void testUpdateTemplateMetadataOnUpgrade() { IndexTemplateMetadata.builder("added_test_template").patterns(randomIndexPatterns()).build() ); return templates; - })); + }), List.of()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); assertNotSame(upgrade, metadata); @@ -57,7 +59,7 @@ public void testUpdateTemplateMetadataOnUpgrade() { public void testNoMetadataUpgrade() { Metadata metadata = randomMetadata(new CustomMetadata1("data")); - MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList()); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList(), List.of()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); assertSame(upgrade, metadata); assertTrue(Metadata.isGlobalStateEquals(upgrade, metadata)); @@ -68,7 +70,7 @@ public void testNoMetadataUpgrade() { public void testCustomMetadataValidation() { Metadata metadata = randomMetadata(new CustomMetadata1("data")); - MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList()); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList(), List.of()); try { GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); } catch (IllegalStateException e) { @@ -78,7 +80,7 @@ public void testCustomMetadataValidation() { public void testIndexMetadataUpgrade() { Metadata metadata = randomMetadata(); - MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList()); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList(), List.of()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(true), metadataUpgrader); assertNotSame(upgrade, metadata); assertTrue(Metadata.isGlobalStateEquals(upgrade, metadata)); @@ -89,7 +91,7 @@ public void testIndexMetadataUpgrade() { public void testCustomMetadataNoChange() { Metadata metadata = randomMetadata(new CustomMetadata1("data")); - MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.singletonList(HashMap::new)); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.singletonList(HashMap::new), List.of()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); assertSame(upgrade, metadata); assertTrue(Metadata.isGlobalStateEquals(upgrade, metadata)); @@ -98,11 +100,74 @@ public void testCustomMetadataNoChange() { } } + public void testCustomMetadata_appliesUpgraders() { + CustomMetadata2 custom2 = new CustomMetadata2("some data"); + // Test with a CustomMetadata1 and a CustomMetadata2... + Metadata originalMetadata = Metadata.builder() + .putCustom(CustomMetadata1.TYPE, new CustomMetadata1("data")) + .putCustom(CustomMetadata2.TYPE, custom2) + .build(); + // ...and two sets of upgraders which affect CustomMetadata1 and some other types... + Map> customUpgraders = Map.of( + CustomMetadata1.TYPE, + toUpgrade -> new CustomMetadata1("new " + ((CustomMetadata1) toUpgrade).getData()), + "not_" + CustomMetadata1.TYPE, + toUpgrade -> { + fail("This upgrader should not be invoked"); + return toUpgrade; + } + ); + Map> moreCustomUpgraders = Map.of("also_not_" + CustomMetadata1.TYPE, toUpgrade -> { + fail("This upgrader should not be invoked"); + return toUpgrade; + }); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(List.of(HashMap::new), List.of(customUpgraders, moreCustomUpgraders)); + Metadata upgradedMetadata = GatewayMetaState.upgradeMetadata( + originalMetadata, + new MockIndexMetadataVerifier(false), + metadataUpgrader + ); + // ...and assert that the CustomMetadata1 has been upgraded... + assertEquals(new CustomMetadata1("new data"), upgradedMetadata.custom(CustomMetadata1.TYPE)); + // ...but the CustomMetadata2 is untouched. + assertSame(custom2, upgradedMetadata.custom(CustomMetadata2.TYPE)); + } + + public void testCustomMetadata_appliesMultipleUpgraders() { + // Test with a CustomMetadata1 and a CustomMetadata2... + Metadata originalMetadata = Metadata.builder() + .putCustom(CustomMetadata1.TYPE, new CustomMetadata1("data")) + .putCustom(CustomMetadata2.TYPE, new CustomMetadata2("other data")) + .build(); + // ...and a set of upgraders which affects both of those... + Map> customUpgraders = Map.of( + CustomMetadata1.TYPE, + toUpgrade -> new CustomMetadata1("new " + ((CustomMetadata1) toUpgrade).getData()), + CustomMetadata2.TYPE, + toUpgrade -> new CustomMetadata2("new " + ((CustomMetadata2) toUpgrade).getData()) + ); + // ...and another set of upgraders which applies a second upgrade to CustomMetadata2... + Map> moreCustomUpgraders = Map.of( + CustomMetadata2.TYPE, + toUpgrade -> new CustomMetadata2("more " + ((CustomMetadata2) toUpgrade).getData()) + ); + MetadataUpgrader metadataUpgrader = new MetadataUpgrader(List.of(HashMap::new), List.of(customUpgraders, moreCustomUpgraders)); + Metadata upgradedMetadata = GatewayMetaState.upgradeMetadata( + originalMetadata, + new MockIndexMetadataVerifier(false), + metadataUpgrader + ); + // ...and assert that the first upgrader has been applied to the CustomMetadata1... + assertEquals(new CustomMetadata1("new data"), upgradedMetadata.custom(CustomMetadata1.TYPE)); + // ...and both upgraders have been applied to the CustomMetadata2. + assertEquals(new CustomMetadata2("more new other data"), upgradedMetadata.custom(CustomMetadata2.TYPE)); + } + public void testIndexTemplateValidation() { Metadata metadata = randomMetadata(); MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.singletonList(customs -> { throw new IllegalStateException("template is incompatible"); - })); + }), List.of()); String message = expectThrows( IllegalStateException.class, () -> GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader) @@ -136,8 +201,7 @@ public void testMultipleIndexTemplateUpgrade() { .build() ); return indexTemplateMetadatas; - - })); + }), List.of()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); assertNotSame(upgrade, metadata); assertFalse(Metadata.isGlobalStateEquals(upgrade, metadata)); @@ -228,6 +292,29 @@ public EnumSet context() { } } + private static class CustomMetadata2 extends TestCustomMetadata { + public static final String TYPE = "custom_md_2"; + + CustomMetadata2(String data) { + super(data); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.current(); + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + private static Metadata randomMetadata(TestCustomMetadata... customMetadatas) { Metadata.Builder builder = Metadata.builder(); for (TestCustomMetadata customMetadata : customMetadatas) { diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java index 6198d6580cb3d..b62fff2eceb28 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java @@ -10,7 +10,6 @@ package org.elasticsearch.ingest; import org.elasticsearch.cluster.DiffableUtils; -import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.Maps; @@ -53,18 +52,16 @@ public void testFromXContent() throws IOException { builder.endObject(); XContentBuilder shuffled = shuffleXContent(builder); try (XContentParser parser = createParser(shuffled)) { - Metadata.Custom custom = IngestMetadata.fromXContent(parser); - assertTrue(custom instanceof IngestMetadata); - IngestMetadata m = (IngestMetadata) custom; - assertEquals(2, m.getPipelines().size()); - assertEquals("1", m.getPipelines().get("1").getId()); - assertEquals("2", m.getPipelines().get("2").getId()); - assertEquals(pipeline.getConfigAsMap(), m.getPipelines().get("1").getConfigAsMap()); - assertEquals(pipeline2.getConfigAsMap(), m.getPipelines().get("2").getConfigAsMap()); + IngestMetadata custom = IngestMetadata.fromXContent(parser); + assertEquals(2, custom.getPipelines().size()); + assertEquals("1", custom.getPipelines().get("1").getId()); + assertEquals("2", custom.getPipelines().get("2").getId()); + assertEquals(pipeline.getConfigAsMap(), custom.getPipelines().get("1").getConfigAsMap()); + assertEquals(pipeline2.getConfigAsMap(), custom.getPipelines().get("2").getConfigAsMap()); } } - public void testDiff() throws Exception { + public void testDiff() { BytesReference pipelineConfig = new BytesArray("{}"); Map pipelines = new HashMap<>(); @@ -79,7 +76,7 @@ public void testDiff() throws Exception { IngestMetadata ingestMetadata2 = new IngestMetadata(pipelines); IngestMetadata.IngestMetadataDiff diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata2.diff(ingestMetadata1); - DiffableUtils.MapDiff pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; + DiffableUtils.MapDiff pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; assertThat(pipelinesDiff.getDeletes(), contains("2")); assertThat(Maps.ofEntries(pipelinesDiff.getUpserts()), allOf(aMapWithSize(2), hasKey("3"), hasKey("4"))); @@ -96,7 +93,7 @@ public void testDiff() throws Exception { IngestMetadata ingestMetadata3 = new IngestMetadata(pipelines); diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata3.diff(ingestMetadata1); - pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; + pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; assertThat(pipelinesDiff.getDeletes(), empty()); assertThat(pipelinesDiff.getUpserts(), empty()); @@ -112,7 +109,7 @@ public void testDiff() throws Exception { IngestMetadata ingestMetadata4 = new IngestMetadata(pipelines); diff = (IngestMetadata.IngestMetadataDiff) ingestMetadata4.diff(ingestMetadata1); - pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; + pipelinesDiff = (DiffableUtils.MapDiff) diff.pipelines; assertThat(Maps.ofEntries(pipelinesDiff.getDiffs()), allOf(aMapWithSize(1), hasKey("2"))); endResult = (IngestMetadata) diff.apply(ingestMetadata4); @@ -138,4 +135,169 @@ public void testChunkedToXContent() { response -> 2 + response.getPipelines().size() ); } + + public void testMaybeUpgradeProcessors_appliesUpgraderToSingleProcessor() { + String originalPipelineConfig = """ + { + "processors": [ + { + "foo": { + "fooNumber": 123 + } + }, + { + "bar": { + "barNumber": 456 + } + } + ] + } + """; + IngestMetadata originalMetadata = new IngestMetadata( + Map.of("pipeline1", new PipelineConfiguration("pipeline1", new BytesArray(originalPipelineConfig), XContentType.JSON)) + ); + IngestMetadata upgradedMetadata = originalMetadata.maybeUpgradeProcessors( + "foo", + config -> config.putIfAbsent("fooString", "new") == null + ); + String expectedPipelineConfig = """ + { + "processors": [ + { + "foo": { + "fooNumber": 123, + "fooString": "new" + } + }, + { + "bar": { + "barNumber": 456 + } + } + ] + } + """; + IngestMetadata expectedMetadata = new IngestMetadata( + Map.of("pipeline1", new PipelineConfiguration("pipeline1", new BytesArray(expectedPipelineConfig), XContentType.JSON)) + ); + assertEquals(expectedMetadata, upgradedMetadata); + } + + public void testMaybeUpgradeProcessors_returnsSameObjectIfNoUpgradeNeeded() { + String originalPipelineConfig = """ + { + "processors": [ + { + "foo": { + "fooNumber": 123, + "fooString": "old" + } + }, + { + "bar": { + "barNumber": 456 + } + } + ] + } + """; + IngestMetadata originalMetadata = new IngestMetadata( + Map.of("pipeline1", new PipelineConfiguration("pipeline1", new BytesArray(originalPipelineConfig), XContentType.JSON)) + ); + IngestMetadata upgradedMetadata = originalMetadata.maybeUpgradeProcessors( + "foo", + config -> config.putIfAbsent("fooString", "new") == null + ); + assertSame(originalMetadata, upgradedMetadata); + } + + public void testMaybeUpgradeProcessors_appliesUpgraderToMultipleProcessorsInMultiplePipelines() { + String originalPipelineConfig1 = """ + { + "description": "A pipeline with a foo and a bar processor in different list items", + "processors": [ + { + "foo": { + "fooNumber": 123 + } + }, + { + "bar": { + "barNumber": 456 + } + } + ] + } + """; + String originalPipelineConfig2 = """ + { + "description": "A pipeline with a foo and a qux processor in the same list item", + "processors": [ + { + "foo": { + "fooNumber": 321 + }, + "qux": { + "quxNumber": 654 + } + } + ] + } + """; + IngestMetadata originalMetadata = new IngestMetadata( + Map.of( + "pipeline1", + new PipelineConfiguration("pipeline1", new BytesArray(originalPipelineConfig1), XContentType.JSON), + "pipeline2", + new PipelineConfiguration("pipeline2", new BytesArray(originalPipelineConfig2), XContentType.JSON) + ) + ); + IngestMetadata upgradedMetadata = originalMetadata.maybeUpgradeProcessors( + "foo", + config -> config.putIfAbsent("fooString", "new") == null + ); + String expectedPipelineConfig1 = """ + { + "description": "A pipeline with a foo and a bar processor in different list items", + "processors": [ + { + "foo": { + "fooNumber": 123, + "fooString": "new" + } + }, + { + "bar": { + "barNumber": 456 + } + } + ] + } + """; + String expectedPipelineConfig2 = """ + { + "description": "A pipeline with a foo and a qux processor in the same list item", + "processors": [ + { + "foo": { + "fooNumber": 321, + "fooString": "new" + }, + "qux": { + "quxNumber": 654 + } + } + ] + } + """; + IngestMetadata expectedMetadata = new IngestMetadata( + Map.of( + "pipeline1", + new PipelineConfiguration("pipeline1", new BytesArray(expectedPipelineConfig1), XContentType.JSON), + "pipeline2", + new PipelineConfiguration("pipeline2", new BytesArray(expectedPipelineConfig2), XContentType.JSON) + ) + ); + assertEquals(expectedMetadata, upgradedMetadata); + } } From 1b03a96e52d1c5b5bcdffe7ab56f4b24276f65fe Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Wed, 13 Nov 2024 11:05:05 -0500 Subject: [PATCH 90/95] Add tracking for query rule types (#116357) * Add total rule type counts to list calls and xpack usage * Add feature * Update docs/changelog/116357.yaml * Fix docs test failure & update yaml tests * remove additional spaces --------- Co-authored-by: Mark J. Hoy --- docs/changelog/116357.yaml | 5 + .../apis/list-query-rulesets.asciidoc | 12 ++- .../org/elasticsearch/TransportVersions.java | 2 + .../EnterpriseSearchFeatureSetUsage.java | 1 + .../entsearch/rules/20_query_ruleset_list.yml | 91 ++++++++++++++++++- .../application/EnterpriseSearchFeatures.java | 7 +- .../EnterpriseSearchUsageTransportAction.java | 27 ++++-- .../rules/QueryRulesIndexService.java | 6 +- .../rules/QueryRulesetListItem.java | 32 ++++++- .../rules/action/ListQueryRulesetsAction.java | 3 + ...setsActionResponseBWCSerializingTests.java | 22 ++++- 11 files changed, 184 insertions(+), 24 deletions(-) create mode 100644 docs/changelog/116357.yaml diff --git a/docs/changelog/116357.yaml b/docs/changelog/116357.yaml new file mode 100644 index 0000000000000..a1a7831eab9ca --- /dev/null +++ b/docs/changelog/116357.yaml @@ -0,0 +1,5 @@ +pr: 116357 +summary: Add tracking for query rule types +area: Relevance +type: enhancement +issues: [] diff --git a/docs/reference/query-rules/apis/list-query-rulesets.asciidoc b/docs/reference/query-rules/apis/list-query-rulesets.asciidoc index 6832934f6985c..304b8c7745007 100644 --- a/docs/reference/query-rules/apis/list-query-rulesets.asciidoc +++ b/docs/reference/query-rules/apis/list-query-rulesets.asciidoc @@ -124,7 +124,7 @@ PUT _query_rules/ruleset-3 }, { "rule_id": "rule-3", - "type": "pinned", + "type": "exclude", "criteria": [ { "type": "fuzzy", @@ -178,6 +178,9 @@ A sample response: "rule_total_count": 1, "rule_criteria_types_counts": { "exact": 1 + }, + "rule_type_counts": { + "pinned": 1 } }, { @@ -186,6 +189,9 @@ A sample response: "rule_criteria_types_counts": { "exact": 1, "fuzzy": 1 + }, + "rule_type_counts": { + "pinned": 2 } }, { @@ -194,6 +200,10 @@ A sample response: "rule_criteria_types_counts": { "exact": 1, "fuzzy": 2 + }, + "rule_type_counts": { + "pinned": 2, + "exclude": 1 } } ] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 661f057bfc5ff..b7da6115a1a48 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -196,6 +196,8 @@ static TransportVersion def(int id) { public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO = def(8_789_00_0); public static final TransportVersion VERTEX_AI_INPUT_TYPE_ADDED = def(8_790_00_0); public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE = def(8_791_00_0); + public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES = def(8_792_00_0); + /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java index b1dac4898945d..a054a18221e9b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java @@ -34,6 +34,7 @@ public class EnterpriseSearchFeatureSetUsage extends XPackFeatureUsage { public static final String MIN_RULE_COUNT = "min_rule_count"; public static final String MAX_RULE_COUNT = "max_rule_count"; public static final String RULE_CRITERIA_TOTAL_COUNTS = "rule_criteria_total_counts"; + public static final String RULE_TYPE_TOTAL_COUNTS = "rule_type_total_counts"; private final Map searchApplicationsUsage; private final Map analyticsCollectionsUsage; diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml index 172d38cce5384..0b98182b39602 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/rules/20_query_ruleset_list.yml @@ -1,7 +1,4 @@ setup: - - requires: - cluster_features: [ "gte_v8.10.0" ] - reason: Introduced in 8.10.0 - do: query_rules.put_ruleset: ruleset_id: test-query-ruleset-3 @@ -222,7 +219,7 @@ teardown: body: rules: - rule_id: query-rule-id1 - type: pinned + type: exclude criteria: - type: exact metadata: query_string @@ -307,3 +304,89 @@ teardown: - match: { error.type: 'security_exception' } +--- +'List query rulesets - include rule types': + - requires: + cluster_features: [ "query_rule_list_types" ] + reason: 'List responses updated in 8.15.5 and 8.16.1' + + - do: + query_rules.put_ruleset: + ruleset_id: a-test-query-ruleset-with-lots-of-criteria + body: + rules: + - rule_id: query-rule-id1 + type: exclude + criteria: + - type: exact + metadata: query_string + values: [ puggles ] + - type: gt + metadata: year + values: [ 2023 ] + actions: + ids: + - 'id1' + - 'id2' + - rule_id: query-rule-id2 + type: pinned + criteria: + - type: exact + metadata: query_string + values: [ pug ] + actions: + ids: + - 'id3' + - 'id4' + - rule_id: query-rule-id3 + type: pinned + criteria: + - type: fuzzy + metadata: query_string + values: [ puggles ] + actions: + ids: + - 'id5' + - 'id6' + - rule_id: query-rule-id4 + type: pinned + criteria: + - type: always + actions: + ids: + - 'id7' + - 'id8' + - rule_id: query-rule-id5 + type: pinned + criteria: + - type: prefix + metadata: query_string + values: [ pug ] + - type: suffix + metadata: query_string + values: [ gle ] + actions: + ids: + - 'id9' + - 'id10' + + - do: + query_rules.list_rulesets: + from: 0 + size: 1 + + - match: { count: 4 } + + # Alphabetical order by ruleset_id for results + - match: { results.0.ruleset_id: "a-test-query-ruleset-with-lots-of-criteria" } + - match: { results.0.rule_total_count: 5 } + - match: + results.0.rule_criteria_types_counts: + exact: 2 + gt: 1 + fuzzy: 1 + prefix: 1 + suffix: 1 + always: 1 + - match: { results.0.rule_type_counts: { pinned: 4, exclude: 1 } } + diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java index ae8e63bdb6420..86882a28ec39f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java @@ -12,6 +12,7 @@ import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xpack.application.analytics.AnalyticsTemplateRegistry; import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; +import org.elasticsearch.xpack.application.rules.action.ListQueryRulesetsAction; import org.elasticsearch.xpack.application.rules.retriever.QueryRuleRetrieverBuilder; import java.util.Map; @@ -23,7 +24,11 @@ public class EnterpriseSearchFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(QUERY_RULES_TEST_API, QueryRuleRetrieverBuilder.QUERY_RULE_RETRIEVERS_SUPPORTED); + return Set.of( + QUERY_RULES_TEST_API, + QueryRuleRetrieverBuilder.QUERY_RULE_RETRIEVERS_SUPPORTED, + ListQueryRulesetsAction.QUERY_RULE_LIST_TYPES + ); } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java index c079892ccb2b6..7683ea7cb28a7 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchUsageTransportAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.application.analytics.action.GetAnalyticsCollectionAction; -import org.elasticsearch.xpack.application.rules.QueryRuleCriteriaType; import org.elasticsearch.xpack.application.rules.QueryRulesIndexService; import org.elasticsearch.xpack.application.rules.QueryRulesetListItem; import org.elasticsearch.xpack.application.rules.action.ListQueryRulesetsAction; @@ -41,7 +40,6 @@ import org.elasticsearch.xpack.core.application.EnterpriseSearchFeatureSetUsage; import java.util.Collections; -import java.util.EnumMap; import java.util.HashMap; import java.util.IntSummaryStatistics; import java.util.List; @@ -226,20 +224,29 @@ private void addQueryRulesetUsage(ListQueryRulesetsAction.Response response, Map List results = response.queryPage().results(); IntSummaryStatistics ruleStats = results.stream().mapToInt(QueryRulesetListItem::ruleTotalCount).summaryStatistics(); - Map criteriaTypeCountMap = new EnumMap<>(QueryRuleCriteriaType.class); - results.stream() - .flatMap(result -> result.criteriaTypeToCountMap().entrySet().stream()) - .forEach(entry -> criteriaTypeCountMap.merge(entry.getKey(), entry.getValue(), Integer::sum)); + Map ruleCriteriaTypeCountMap = new HashMap<>(); + Map ruleTypeCountMap = new HashMap<>(); - Map rulesTypeCountMap = new HashMap<>(); - criteriaTypeCountMap.forEach((criteriaType, count) -> rulesTypeCountMap.put(criteriaType.name().toLowerCase(Locale.ROOT), count)); + results.forEach(result -> { + populateCounts(ruleCriteriaTypeCountMap, result.criteriaTypeToCountMap()); + populateCounts(ruleTypeCountMap, result.ruleTypeToCountMap()); + }); queryRulesUsage.put(TOTAL_COUNT, response.queryPage().count()); queryRulesUsage.put(TOTAL_RULE_COUNT, ruleStats.getSum()); queryRulesUsage.put(MIN_RULE_COUNT, results.isEmpty() ? 0 : ruleStats.getMin()); queryRulesUsage.put(MAX_RULE_COUNT, results.isEmpty() ? 0 : ruleStats.getMax()); - if (rulesTypeCountMap.isEmpty() == false) { - queryRulesUsage.put(RULE_CRITERIA_TOTAL_COUNTS, rulesTypeCountMap); + if (ruleCriteriaTypeCountMap.isEmpty() == false) { + queryRulesUsage.put(RULE_CRITERIA_TOTAL_COUNTS, ruleCriteriaTypeCountMap); + } + if (ruleTypeCountMap.isEmpty() == false) { + queryRulesUsage.put(EnterpriseSearchFeatureSetUsage.RULE_TYPE_TOTAL_COUNTS, ruleTypeCountMap); } } + + private void populateCounts(Map targetMap, Map, Integer> sourceMap) { + sourceMap.forEach( + (key, value) -> targetMap.merge(key.name().toLowerCase(Locale.ROOT), value, (v1, v2) -> (Integer) v1 + (Integer) v2) + ); + } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java index 3ce51ae5d832d..9b264a2cc41cf 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesIndexService.java @@ -445,6 +445,7 @@ private static QueryRulesetListItem hitToQueryRulesetListItem(SearchHit searchHi final List> rules = ((List>) sourceMap.get(QueryRuleset.RULES_FIELD.getPreferredName())); final int numRules = rules.size(); final Map queryRuleCriteriaTypeToCountMap = new EnumMap<>(QueryRuleCriteriaType.class); + final Map ruleTypeToCountMap = new EnumMap<>(QueryRule.QueryRuleType.class); for (LinkedHashMap rule : rules) { @SuppressWarnings("unchecked") List> criteriaList = ((List>) rule.get(QueryRule.CRITERIA_FIELD.getPreferredName())); @@ -453,9 +454,12 @@ private static QueryRulesetListItem hitToQueryRulesetListItem(SearchHit searchHi final QueryRuleCriteriaType queryRuleCriteriaType = QueryRuleCriteriaType.type(criteriaType); queryRuleCriteriaTypeToCountMap.compute(queryRuleCriteriaType, (k, v) -> v == null ? 1 : v + 1); } + final String ruleType = ((String) rule.get(QueryRule.TYPE_FIELD.getPreferredName())); + final QueryRule.QueryRuleType queryRuleType = QueryRule.QueryRuleType.queryRuleType(ruleType); + ruleTypeToCountMap.compute(queryRuleType, (k, v) -> v == null ? 1 : v + 1); } - return new QueryRulesetListItem(rulesetId, numRules, queryRuleCriteriaTypeToCountMap); + return new QueryRulesetListItem(rulesetId, numRules, queryRuleCriteriaTypeToCountMap, ruleTypeToCountMap); } public record QueryRulesetResult(List rulesets, long totalResults) {} diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java index f3bc07387512f..a5e2d3f79da0e 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java @@ -32,10 +32,12 @@ public class QueryRulesetListItem implements Writeable, ToXContentObject { public static final ParseField RULESET_ID_FIELD = new ParseField("ruleset_id"); public static final ParseField RULE_TOTAL_COUNT_FIELD = new ParseField("rule_total_count"); public static final ParseField RULE_CRITERIA_TYPE_COUNTS_FIELD = new ParseField("rule_criteria_types_counts"); + public static final ParseField RULE_TYPE_COUNTS_FIELD = new ParseField("rule_type_counts"); private final String rulesetId; private final int ruleTotalCount; private final Map criteriaTypeToCountMap; + private final Map ruleTypeToCountMap; /** * Constructs a QueryRulesetListItem. @@ -44,11 +46,17 @@ public class QueryRulesetListItem implements Writeable, ToXContentObject { * @param ruleTotalCount The number of rules contained within the ruleset. * @param criteriaTypeToCountMap A map of criteria type to the number of rules of that type. */ - public QueryRulesetListItem(String rulesetId, int ruleTotalCount, Map criteriaTypeToCountMap) { + public QueryRulesetListItem( + String rulesetId, + int ruleTotalCount, + Map criteriaTypeToCountMap, + Map ruleTypeToCountMap + ) { Objects.requireNonNull(rulesetId, "rulesetId cannot be null on a QueryRuleListItem"); this.rulesetId = rulesetId; this.ruleTotalCount = ruleTotalCount; this.criteriaTypeToCountMap = criteriaTypeToCountMap; + this.ruleTypeToCountMap = ruleTypeToCountMap; } public QueryRulesetListItem(StreamInput in) throws IOException { @@ -59,6 +67,11 @@ public QueryRulesetListItem(StreamInput in) throws IOException { } else { this.criteriaTypeToCountMap = Map.of(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { + this.ruleTypeToCountMap = in.readMap(m -> in.readEnum(QueryRule.QueryRuleType.class), StreamInput::readInt); + } else { + this.ruleTypeToCountMap = Map.of(); + } } @Override @@ -71,6 +84,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(criteriaType.name().toLowerCase(Locale.ROOT), criteriaTypeToCountMap.get(criteriaType)); } builder.endObject(); + builder.startObject(RULE_TYPE_COUNTS_FIELD.getPreferredName()); + for (QueryRule.QueryRuleType ruleType : ruleTypeToCountMap.keySet()) { + builder.field(ruleType.name().toLowerCase(Locale.ROOT), ruleTypeToCountMap.get(ruleType)); + } + builder.endObject(); builder.endObject(); return builder; } @@ -82,6 +100,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(EXPANDED_RULESET_COUNT_TRANSPORT_VERSION)) { out.writeMap(criteriaTypeToCountMap, StreamOutput::writeEnum, StreamOutput::writeInt); } + if (out.getTransportVersion().onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { + out.writeMap(ruleTypeToCountMap, StreamOutput::writeEnum, StreamOutput::writeInt); + } } /** @@ -106,6 +127,10 @@ public Map criteriaTypeToCountMap() { return criteriaTypeToCountMap; } + public Map ruleTypeToCountMap() { + return ruleTypeToCountMap; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -113,11 +138,12 @@ public boolean equals(Object o) { QueryRulesetListItem that = (QueryRulesetListItem) o; return ruleTotalCount == that.ruleTotalCount && Objects.equals(rulesetId, that.rulesetId) - && Objects.equals(criteriaTypeToCountMap, that.criteriaTypeToCountMap); + && Objects.equals(criteriaTypeToCountMap, that.criteriaTypeToCountMap) + && Objects.equals(ruleTypeToCountMap, that.ruleTypeToCountMap); } @Override public int hashCode() { - return Objects.hash(rulesetId, ruleTotalCount, criteriaTypeToCountMap); + return Objects.hash(rulesetId, ruleTotalCount, criteriaTypeToCountMap, ruleTypeToCountMap); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java index 11397583ce5b9..62f9f3fd46cc4 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -33,6 +34,8 @@ public class ListQueryRulesetsAction { public static final String NAME = "cluster:admin/xpack/query_rules/list"; public static final ActionType INSTANCE = new ActionType<>(NAME); + public static final NodeFeature QUERY_RULE_LIST_TYPES = new NodeFeature("query_rule_list_types"); + private ListQueryRulesetsAction() {/* no instances */} public static class Request extends ActionRequest implements ToXContentObject { diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java index 5ae0f51cb6112..27ac214558f89 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java @@ -8,8 +8,10 @@ package org.elasticsearch.xpack.application.rules.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.application.EnterpriseSearchModuleTestUtils; +import org.elasticsearch.xpack.application.rules.QueryRule; import org.elasticsearch.xpack.application.rules.QueryRuleCriteriaType; import org.elasticsearch.xpack.application.rules.QueryRuleset; import org.elasticsearch.xpack.application.rules.QueryRulesetListItem; @@ -32,9 +34,13 @@ private static ListQueryRulesetsAction.Response randomQueryRulesetListItem() { QueryRuleset queryRuleset = EnterpriseSearchModuleTestUtils.randomQueryRuleset(); Map criteriaTypeToCountMap = Map.of( randomFrom(QueryRuleCriteriaType.values()), - randomIntBetween(0, 10) + randomIntBetween(1, 10) ); - return new QueryRulesetListItem(queryRuleset.id(), queryRuleset.rules().size(), criteriaTypeToCountMap); + Map ruleTypeToCountMap = Map.of( + randomFrom(QueryRule.QueryRuleType.values()), + randomIntBetween(1, 10) + ); + return new QueryRulesetListItem(queryRuleset.id(), queryRuleset.rules().size(), criteriaTypeToCountMap, ruleTypeToCountMap); }), randomLongBetween(0, 1000)); } @@ -53,12 +59,20 @@ protected ListQueryRulesetsAction.Response mutateInstanceForVersion( ListQueryRulesetsAction.Response instance, TransportVersion version ) { - if (version.onOrAfter(QueryRulesetListItem.EXPANDED_RULESET_COUNT_TRANSPORT_VERSION)) { + if (version.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { return instance; + } else if (version.onOrAfter(QueryRulesetListItem.EXPANDED_RULESET_COUNT_TRANSPORT_VERSION)) { + List updatedResults = new ArrayList<>(); + for (QueryRulesetListItem listItem : instance.queryPage.results()) { + updatedResults.add( + new QueryRulesetListItem(listItem.rulesetId(), listItem.ruleTotalCount(), listItem.criteriaTypeToCountMap(), Map.of()) + ); + } + return new ListQueryRulesetsAction.Response(updatedResults, instance.queryPage.count()); } else { List updatedResults = new ArrayList<>(); for (QueryRulesetListItem listItem : instance.queryPage.results()) { - updatedResults.add(new QueryRulesetListItem(listItem.rulesetId(), listItem.ruleTotalCount(), Map.of())); + updatedResults.add(new QueryRulesetListItem(listItem.rulesetId(), listItem.ruleTotalCount(), Map.of(), Map.of())); } return new ListQueryRulesetsAction.Response(updatedResults, instance.queryPage.count()); } From cdd77c65cb7950c1c428ebc2ad144abdefa5d918 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Wed, 13 Nov 2024 17:20:54 +0100 Subject: [PATCH 91/95] Add Search Phase APM metrics (#113194) --- docs/changelog/113194.yaml | 5 + .../search/SearchTransportAPMMetrics.java | 51 ---- .../action/search/SearchTransportService.java | 124 +++------- .../action/search/TransportSearchAction.java | 3 +- .../org/elasticsearch/index/IndexModule.java | 7 +- .../stats/ShardSearchPhaseAPMMetrics.java | 64 +++++ .../elasticsearch/indices/IndicesService.java | 10 +- .../indices/IndicesServiceBuilder.java | 12 + .../elasticsearch/node/NodeConstruction.java | 9 +- .../elasticsearch/threadpool/ThreadPool.java | 7 + .../search/TransportSearchActionTests.java | 1 - .../elasticsearch/index/IndexModuleTests.java | 19 +- .../SearchTransportTelemetryTests.java | 142 ----------- .../ShardSearchPhaseAPMMetricsTests.java | 220 ++++++++++++++++++ .../snapshots/SnapshotResiliencyTests.java | 2 - .../xpack/security/SecurityTests.java | 3 +- .../xpack/watcher/WatcherPluginTests.java | 3 +- 17 files changed, 370 insertions(+), 312 deletions(-) create mode 100644 docs/changelog/113194.yaml delete mode 100644 server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java create mode 100644 server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchPhaseAPMMetrics.java delete mode 100644 server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/TelemetryMetrics/ShardSearchPhaseAPMMetricsTests.java diff --git a/docs/changelog/113194.yaml b/docs/changelog/113194.yaml new file mode 100644 index 0000000000000..132659321c65e --- /dev/null +++ b/docs/changelog/113194.yaml @@ -0,0 +1,5 @@ +pr: 113194 +summary: Add Search Phase APM metrics +area: Search +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java deleted file mode 100644 index 6141e1704969b..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.action.search; - -import org.elasticsearch.telemetry.metric.LongHistogram; -import org.elasticsearch.telemetry.metric.MeterRegistry; - -public class SearchTransportAPMMetrics { - public static final String SEARCH_ACTION_LATENCY_BASE_METRIC = "es.search.nodes.transport_actions.latency.histogram"; - public static final String ACTION_ATTRIBUTE_NAME = "action"; - - public static final String QUERY_CAN_MATCH_NODE_METRIC = "shards_can_match"; - public static final String DFS_ACTION_METRIC = "dfs_query_then_fetch/shard_dfs_phase"; - public static final String QUERY_ID_ACTION_METRIC = "dfs_query_then_fetch/shard_query_phase"; - public static final String QUERY_ACTION_METRIC = "query_then_fetch/shard_query_phase"; - public static final String RANK_SHARD_FEATURE_ACTION_METRIC = "rank/shard_feature_phase"; - public static final String FREE_CONTEXT_ACTION_METRIC = "shard_release_context"; - public static final String FETCH_ID_ACTION_METRIC = "shard_fetch_phase"; - public static final String QUERY_SCROLL_ACTION_METRIC = "scroll/shard_query_phase"; - public static final String FETCH_ID_SCROLL_ACTION_METRIC = "scroll/shard_fetch_phase"; - public static final String QUERY_FETCH_SCROLL_ACTION_METRIC = "scroll/shard_query_and_fetch_phase"; - public static final String FREE_CONTEXT_SCROLL_ACTION_METRIC = "scroll/shard_release_context"; - public static final String CLEAR_SCROLL_CONTEXTS_ACTION_METRIC = "scroll/shard_release_contexts"; - - private final LongHistogram actionLatencies; - - public SearchTransportAPMMetrics(MeterRegistry meterRegistry) { - this( - meterRegistry.registerLongHistogram( - SEARCH_ACTION_LATENCY_BASE_METRIC, - "Transport action execution times at the node level, expressed as a histogram", - "millis" - ) - ); - } - - private SearchTransportAPMMetrics(LongHistogram actionLatencies) { - this.actionLatencies = actionLatencies; - } - - public LongHistogram getActionLatencies() { - return actionLatencies; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 604cf950f083b..8444a92b24432 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -67,20 +67,6 @@ import java.util.concurrent.Executor; import java.util.function.BiFunction; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.ACTION_ATTRIBUTE_NAME; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.CLEAR_SCROLL_CONTEXTS_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.DFS_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_CAN_MATCH_NODE_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_FETCH_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ID_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.RANK_SHARD_FEATURE_ACTION_METRIC; - /** * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through * transport. @@ -450,11 +436,7 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static void registerRequestHandler( - TransportService transportService, - SearchService searchService, - SearchTransportAPMMetrics searchTransportMetrics - ) { + public static void registerRequestHandler(TransportService transportService, SearchService searchService) { final TransportRequestHandler freeContextHandler = (request, channel, task) -> { logger.trace("releasing search context [{}]", request.id()); boolean freed = searchService.freeReaderContext(request.id()); @@ -465,7 +447,7 @@ public static void registerRequestHandler( FREE_CONTEXT_SCROLL_ACTION_NAME, freeContextExecutor, ScrollFreeContextRequest::new, - instrumentedHandler(FREE_CONTEXT_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler) + freeContextHandler ); TransportActionProxy.registerProxyAction( transportService, @@ -478,7 +460,7 @@ public static void registerRequestHandler( FREE_CONTEXT_ACTION_NAME, freeContextExecutor, SearchFreeContextRequest::new, - instrumentedHandler(FREE_CONTEXT_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler) + freeContextHandler ); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, false, SearchFreeContextResponse::readFrom); @@ -486,10 +468,10 @@ public static void registerRequestHandler( CLEAR_SCROLL_CONTEXTS_ACTION_NAME, freeContextExecutor, ClearScrollContextsRequest::new, - instrumentedHandler(CLEAR_SCROLL_CONTEXTS_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { + (request, channel, task) -> { searchService.freeAllScrollContexts(); channel.sendResponse(TransportResponse.Empty.INSTANCE); - }) + } ); TransportActionProxy.registerProxyAction( transportService, @@ -502,16 +484,7 @@ public static void registerRequestHandler( DFS_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardSearchRequest::new, - instrumentedHandler( - DFS_ACTION_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.executeDfsPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ) - ) + (request, channel, task) -> searchService.executeDfsPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)) ); TransportActionProxy.registerProxyAction(transportService, DFS_ACTION_NAME, true, DfsSearchResult::new); @@ -519,15 +492,10 @@ public static void registerRequestHandler( QUERY_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardSearchRequest::new, - instrumentedHandler( - QUERY_ACTION_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.executeQueryPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ) + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) ) ); TransportActionProxy.registerProxyActionWithDynamicResponseType( @@ -541,15 +509,10 @@ public static void registerRequestHandler( QUERY_ID_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, QuerySearchRequest::new, - instrumentedHandler( - QUERY_ID_ACTION_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.executeQueryPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ) + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, true, QuerySearchResult::new); @@ -558,15 +521,10 @@ public static void registerRequestHandler( QUERY_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, InternalScrollSearchRequest::new, - instrumentedHandler( - QUERY_SCROLL_ACTION_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.executeQueryPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ) + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, true, ScrollQuerySearchResult::new); @@ -575,15 +533,10 @@ public static void registerRequestHandler( QUERY_FETCH_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, InternalScrollSearchRequest::new, - instrumentedHandler( - QUERY_FETCH_SCROLL_ACTION_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.executeFetchPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ) + (request, channel, task) -> searchService.executeFetchPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, true, ScrollQueryFetchSearchResult::new); @@ -594,7 +547,7 @@ public static void registerRequestHandler( RANK_FEATURE_SHARD_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, RankFeatureShardRequest::new, - instrumentedHandler(RANK_SHARD_FEATURE_ACTION_METRIC, transportService, searchTransportMetrics, rankShardFeatureRequest) + rankShardFeatureRequest ); TransportActionProxy.registerProxyAction(transportService, RANK_FEATURE_SHARD_ACTION_NAME, true, RankFeatureResult::new); @@ -604,7 +557,7 @@ public static void registerRequestHandler( FETCH_ID_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardFetchRequest::new, - instrumentedHandler(FETCH_ID_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, shardFetchRequestHandler) + shardFetchRequestHandler ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, true, FetchSearchResult::new); @@ -614,7 +567,7 @@ public static void registerRequestHandler( true, true, ShardFetchSearchRequest::new, - instrumentedHandler(FETCH_ID_ACTION_METRIC, transportService, searchTransportMetrics, shardFetchRequestHandler) + shardFetchRequestHandler ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, true, FetchSearchResult::new); @@ -622,12 +575,7 @@ public static void registerRequestHandler( QUERY_CAN_MATCH_NODE_NAME, transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION), CanMatchNodeRequest::new, - instrumentedHandler( - QUERY_CAN_MATCH_NODE_METRIC, - transportService, - searchTransportMetrics, - (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) - ) + (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) ); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NODE_NAME, true, CanMatchNodeResponse::new); } @@ -658,26 +606,6 @@ public void onFailure(Exception e) { }); } - private static TransportRequestHandler instrumentedHandler( - String actionQualifier, - TransportService transportService, - SearchTransportAPMMetrics searchTransportMetrics, - TransportRequestHandler transportRequestHandler - ) { - var threadPool = transportService.getThreadPool(); - var latencies = searchTransportMetrics.getActionLatencies(); - Map attributes = Map.of(ACTION_ATTRIBUTE_NAME, actionQualifier); - return (request, channel, task) -> { - var startTime = threadPool.relativeTimeInMillis(); - try { - transportRequestHandler.messageReceived(request, channel, task); - } finally { - var elapsedTime = threadPool.relativeTimeInMillis() - startTime; - latencies.record(elapsedTime, attributes); - } - }; - } - /** * Returns a connection to the given node on the provided cluster. If the cluster alias is null the node will be resolved * against the local cluster. diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 35f106ab58cbc..9aab5d005b1bb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -175,7 +175,6 @@ public TransportSearchAction( IndexNameExpressionResolver indexNameExpressionResolver, NamedWriteableRegistry namedWriteableRegistry, ExecutorSelector executorSelector, - SearchTransportAPMMetrics searchTransportMetrics, SearchResponseMetrics searchResponseMetrics, Client client, UsageService usageService @@ -186,7 +185,7 @@ public TransportSearchAction( this.searchPhaseController = searchPhaseController; this.searchTransportService = searchTransportService; this.remoteClusterService = searchTransportService.getRemoteClusterService(); - SearchTransportService.registerRequestHandler(transportService, searchService, searchTransportMetrics); + SearchTransportService.registerRequestHandler(transportService, searchService); this.clusterService = clusterService; this.transportService = transportService; this.searchService = searchService; diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 4ff7ef60cc0a2..64182b000827d 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -167,7 +167,7 @@ public interface DirectoryWrapper { private final Map> similarities = new HashMap<>(); private final Map directoryFactories; private final SetOnce> forceQueryCacheProvider = new SetOnce<>(); - private final List searchOperationListeners = new ArrayList<>(); + private final List searchOperationListeners; private final List indexOperationListeners = new ArrayList<>(); private final IndexNameExpressionResolver expressionResolver; private final AtomicBoolean frozen = new AtomicBoolean(false); @@ -194,11 +194,14 @@ public IndexModule( final IndexNameExpressionResolver expressionResolver, final Map recoveryStateFactories, final SlowLogFieldProvider slowLogFieldProvider, - final MapperMetrics mapperMetrics + final MapperMetrics mapperMetrics, + final List searchOperationListeners ) { this.indexSettings = indexSettings; this.analysisRegistry = analysisRegistry; this.engineFactory = Objects.requireNonNull(engineFactory); + // Need to have a mutable arraylist for plugins to add listeners to it + this.searchOperationListeners = new ArrayList<>(searchOperationListeners); this.searchOperationListeners.add(new SearchSlowLog(indexSettings, slowLogFieldProvider)); this.indexOperationListeners.add(new IndexingSlowLog(indexSettings, slowLogFieldProvider)); this.directoryFactories = Collections.unmodifiableMap(directoryFactories); diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchPhaseAPMMetrics.java b/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchPhaseAPMMetrics.java new file mode 100644 index 0000000000000..6b523a154379e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchPhaseAPMMetrics.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.search.stats; + +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.index.shard.SearchOperationListener; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public final class ShardSearchPhaseAPMMetrics implements SearchOperationListener { + + public static final String QUERY_SEARCH_PHASE_METRIC = "es.search.shards.phases.query.duration.histogram"; + public static final String FETCH_SEARCH_PHASE_METRIC = "es.search.shards.phases.fetch.duration.histogram"; + + public static final String SYSTEM_THREAD_ATTRIBUTE_NAME = "system_thread"; + + private final LongHistogram queryPhaseMetric; + private final LongHistogram fetchPhaseMetric; + + // Avoid allocating objects in the search path and multithreading clashes + private static final ThreadLocal> THREAD_LOCAL_ATTRS = ThreadLocal.withInitial(() -> new HashMap<>(1)); + + public ShardSearchPhaseAPMMetrics(MeterRegistry meterRegistry) { + this.queryPhaseMetric = meterRegistry.registerLongHistogram( + QUERY_SEARCH_PHASE_METRIC, + "Query search phase execution times at the shard level, expressed as a histogram", + "ms" + ); + this.fetchPhaseMetric = meterRegistry.registerLongHistogram( + FETCH_SEARCH_PHASE_METRIC, + "Fetch search phase execution times at the shard level, expressed as a histogram", + "ms" + ); + } + + @Override + public void onQueryPhase(SearchContext searchContext, long tookInNanos) { + recordPhaseLatency(queryPhaseMetric, tookInNanos); + } + + @Override + public void onFetchPhase(SearchContext searchContext, long tookInNanos) { + recordPhaseLatency(fetchPhaseMetric, tookInNanos); + } + + private static void recordPhaseLatency(LongHistogram histogramMetric, long tookInNanos) { + Map attrs = ShardSearchPhaseAPMMetrics.THREAD_LOCAL_ATTRS.get(); + boolean isSystem = ((EsExecutors.EsThread) Thread.currentThread()).isSystem(); + attrs.put(SYSTEM_THREAD_ATTRIBUTE_NAME, isSystem); + histogramMetric.record(TimeUnit.NANOSECONDS.toMillis(tookInNanos), attrs); + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 706f788e8a310..3ac61bbca1a21 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -124,6 +124,7 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.IndexingStats; +import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; @@ -263,6 +264,7 @@ public class IndicesService extends AbstractLifecycleComponent private final CheckedBiConsumer requestCacheKeyDifferentiator; private final MapperMetrics mapperMetrics; private final PostRecoveryMerger postRecoveryMerger; + private final List searchOperationListeners; @Override protected void doStart() { @@ -379,8 +381,8 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.timestampFieldMapperService = new TimestampFieldMapperService(settings, threadPool, this); - this.postRecoveryMerger = new PostRecoveryMerger(settings, threadPool.executor(ThreadPool.Names.FORCE_MERGE), this::getShardOrNull); + this.searchOperationListeners = builder.searchOperationListener; } private static final String DANGLING_INDICES_UPDATE_THREAD_NAME = "DanglingIndices#updateTask"; @@ -752,7 +754,8 @@ private synchronized IndexService createIndexService( indexNameExpressionResolver, recoveryStateFactories, loadSlowLogFieldProvider(), - mapperMetrics + mapperMetrics, + searchOperationListeners ); for (IndexingOperationListener operationListener : indexingOperationListeners) { indexModule.addIndexOperationListener(operationListener); @@ -830,7 +833,8 @@ public synchronized MapperService createIndexMapperServiceForValidation(IndexMet indexNameExpressionResolver, recoveryStateFactories, loadSlowLogFieldProvider(), - mapperMetrics + mapperMetrics, + searchOperationListeners ); pluginsService.forEach(p -> p.onIndexModule(indexModule)); return indexModule.newIndexMapperService(clusterService, parserConfig, mapperRegistry, scriptService); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java index 8fff1f5bef51f..08d1b5ce3a96c 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperRegistry; +import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.IndexStorePlugin; @@ -74,6 +75,7 @@ public class IndicesServiceBuilder { @Nullable CheckedBiConsumer requestCacheKeyDifferentiator; MapperMetrics mapperMetrics; + List searchOperationListener = List.of(); public IndicesServiceBuilder settings(Settings settings) { this.settings = settings; @@ -177,6 +179,15 @@ public IndicesServiceBuilder mapperMetrics(MapperMetrics mapperMetrics) { return this; } + public List searchOperationListeners() { + return searchOperationListener; + } + + public IndicesServiceBuilder searchOperationListeners(List searchOperationListener) { + this.searchOperationListener = searchOperationListener; + return this; + } + public IndicesService build() { Objects.requireNonNull(settings); Objects.requireNonNull(pluginsService); @@ -201,6 +212,7 @@ public IndicesService build() { Objects.requireNonNull(indexFoldersDeletionListeners); Objects.requireNonNull(snapshotCommitSuppliers); Objects.requireNonNull(mapperMetrics); + Objects.requireNonNull(searchOperationListener); // collect engine factory providers from plugins engineFactoryProviders = pluginsService.filterPlugins(EnginePlugin.class) diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index c883fca8d047f..e8b9d18a1dd08 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.ingest.ReservedPipelineAction; import org.elasticsearch.action.search.SearchExecutionStatsCollector; import org.elasticsearch.action.search.SearchPhaseController; -import org.elasticsearch.action.search.SearchTransportAPMMetrics; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.update.UpdateHelper; @@ -117,6 +116,8 @@ import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.SourceFieldMetrics; +import org.elasticsearch.index.search.stats.ShardSearchPhaseAPMMetrics; +import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; @@ -798,6 +799,9 @@ private void construct( threadPool::relativeTimeInMillis ); MapperMetrics mapperMetrics = new MapperMetrics(sourceFieldMetrics); + final List searchOperationListeners = List.of( + new ShardSearchPhaseAPMMetrics(telemetryProvider.getMeterRegistry()) + ); IndicesService indicesService = new IndicesServiceBuilder().settings(settings) .pluginsService(pluginsService) @@ -819,6 +823,7 @@ private void construct( .valuesSourceRegistry(searchModule.getValuesSourceRegistry()) .requestCacheKeyDifferentiator(searchModule.getRequestCacheKeyDifferentiator()) .mapperMetrics(mapperMetrics) + .searchOperationListeners(searchOperationListeners) .build(); final var parameters = new IndexSettingProvider.Parameters(indicesService::createIndexMapperServiceForValidation); @@ -1002,7 +1007,6 @@ private void construct( telemetryProvider.getTracer() ); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); - final SearchTransportAPMMetrics searchTransportAPMMetrics = new SearchTransportAPMMetrics(telemetryProvider.getMeterRegistry()); final SearchResponseMetrics searchResponseMetrics = new SearchResponseMetrics(telemetryProvider.getMeterRegistry()); final SearchTransportService searchTransportService = new SearchTransportService( transportService, @@ -1182,7 +1186,6 @@ private void construct( b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); b.bind(MetadataUpdateSettingsService.class).toInstance(metadataUpdateSettingsService); b.bind(SearchService.class).toInstance(searchService); - b.bind(SearchTransportAPMMetrics.class).toInstance(searchTransportAPMMetrics); b.bind(SearchResponseMetrics.class).toInstance(searchResponseMetrics); b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(searchService::aggReduceContextBuilder)); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index f55e3740aaa8f..cc5e96327b241 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -1062,6 +1062,13 @@ public static boolean assertCurrentThreadPool(String... permittedThreadPoolNames return true; } + public static boolean assertTestThreadPool() { + final var threadName = Thread.currentThread().getName(); + final var executorName = EsExecutors.executorName(threadName); + assert threadName.startsWith("TEST-") || threadName.startsWith("LuceneTestCase") : threadName + " is not a test thread"; + return true; + } + public static boolean assertInSystemContext(ThreadPool threadPool) { final var threadName = Thread.currentThread().getName(); assert threadName.startsWith("TEST-") || threadName.startsWith("LuceneTestCase") || threadPool.getThreadContext().isSystemContext() diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 70682cfd41d82..a9de118c6b859 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -1765,7 +1765,6 @@ protected void doWriteTo(StreamOutput out) throws IOException { new IndexNameExpressionResolver(threadPool.getThreadContext(), EmptySystemIndices.INSTANCE), null, null, - new SearchTransportAPMMetrics(TelemetryProvider.NOOP.getMeterRegistry()), new SearchResponseMetrics(TelemetryProvider.NOOP.getMeterRegistry()), client, new UsageService() diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 909005d228665..49a4d519c0ea4 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -111,6 +111,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; @@ -237,7 +238,8 @@ public void testWrapperIsBound() throws IOException { indexNameExpressionResolver, Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); module.setReaderWrapper(s -> new Wrapper()); @@ -264,7 +266,8 @@ public void testRegisterIndexStore() throws IOException { indexNameExpressionResolver, Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); final IndexService indexService = newIndexService(module); @@ -289,7 +292,8 @@ public void testDirectoryWrapper() throws IOException { indexNameExpressionResolver, Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); module.setDirectoryWrapper(new TestDirectoryWrapper()); @@ -642,7 +646,8 @@ public void testRegisterCustomRecoveryStateFactory() throws IOException { indexNameExpressionResolver, recoveryStateFactories, mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); final IndexService indexService = newIndexService(module); @@ -664,7 +669,8 @@ public void testIndexCommitListenerIsBound() throws IOException, ExecutionExcept indexNameExpressionResolver, Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); final AtomicLong lastAcquiredPrimaryTerm = new AtomicLong(); @@ -766,7 +772,8 @@ private static IndexModule createIndexModule( indexNameExpressionResolver, Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + emptyList() ); } diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java deleted file mode 100644 index 15f5ed0d800d2..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.search.TelemetryMetrics; - -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.telemetry.Measurement; -import org.elasticsearch.telemetry.TestTelemetryPlugin; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.junit.After; -import org.junit.Before; - -import java.util.Collection; -import java.util.List; - -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.DFS_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ID_ACTION_METRIC; -import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_SCROLL_ACTION_METRIC; -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; -import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; - -public class SearchTransportTelemetryTests extends ESSingleNodeTestCase { - - private static final String indexName = "test_search_metrics2"; - private final int num_primaries = randomIntBetween(2, 7); - - @Override - protected boolean resetNodeAfterTest() { - return true; - } - - @Before - private void setUpIndex() throws Exception { - createIndex( - indexName, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, num_primaries) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .build() - ); - ensureGreen(indexName); - - prepareIndex(indexName).setId("1").setSource("body", "doc1").setRefreshPolicy(IMMEDIATE).get(); - prepareIndex(indexName).setId("2").setSource("body", "doc2").setRefreshPolicy(IMMEDIATE).get(); - } - - @After - private void afterTest() { - resetMeter(); - } - - @Override - protected Collection> getPlugins() { - return pluginList(TestTelemetryPlugin.class); - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103810") - public void testSearchTransportMetricsDfsQueryThenFetch() throws InterruptedException { - assertSearchHitsWithoutFailures( - client().prepareSearch(indexName).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("doc1")), - "1" - ); - assertEquals(num_primaries, getNumberOfMeasurements(DFS_ACTION_METRIC)); - assertEquals(num_primaries, getNumberOfMeasurements(QUERY_ID_ACTION_METRIC)); - assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); - resetMeter(); - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103810") - public void testSearchTransportMetricsQueryThenFetch() throws InterruptedException { - assertSearchHitsWithoutFailures( - client().prepareSearch(indexName).setSearchType(SearchType.QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("doc1")), - "1" - ); - assertEquals(num_primaries, getNumberOfMeasurements(QUERY_ACTION_METRIC)); - assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); - resetMeter(); - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103810") - public void testSearchTransportMetricsScroll() throws InterruptedException { - assertScrollResponsesAndHitCount( - client(), - TimeValue.timeValueSeconds(60), - client().prepareSearch(indexName) - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setSize(1) - .setQuery(simpleQueryStringQuery("doc1 doc2")), - 2, - (respNum, response) -> { - if (respNum == 1) { - assertEquals(num_primaries, getNumberOfMeasurements(DFS_ACTION_METRIC)); - assertEquals(num_primaries, getNumberOfMeasurements(QUERY_ID_ACTION_METRIC)); - assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); - } else if (respNum == 2) { - assertEquals(num_primaries, getNumberOfMeasurements(QUERY_SCROLL_ACTION_METRIC)); - assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_SCROLL_ACTION_METRIC)); - } - resetMeter(); - } - ); - - assertEquals(num_primaries, getNumberOfMeasurements(FREE_CONTEXT_SCROLL_ACTION_METRIC)); - resetMeter(); - } - - private void resetMeter() { - getTestTelemetryPlugin().resetMeter(); - } - - private TestTelemetryPlugin getTestTelemetryPlugin() { - return getInstanceFromNode(PluginsService.class).filterPlugins(TestTelemetryPlugin.class).toList().get(0); - } - - private long getNumberOfMeasurements(String attributeValue) { - final List measurements = getTestTelemetryPlugin().getLongHistogramMeasurement( - org.elasticsearch.action.search.SearchTransportAPMMetrics.SEARCH_ACTION_LATENCY_BASE_METRIC - ); - return measurements.stream() - .filter( - m -> m.attributes().get(org.elasticsearch.action.search.SearchTransportAPMMetrics.ACTION_ATTRIBUTE_NAME) == attributeValue - ) - .count(); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/ShardSearchPhaseAPMMetricsTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/ShardSearchPhaseAPMMetricsTests.java new file mode 100644 index 0000000000000..80bb7ebc8ddb8 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/ShardSearchPhaseAPMMetricsTests.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.search.TelemetryMetrics; + +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.indices.ExecutorNames; +import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.Collection; +import java.util.List; +import java.util.stream.Stream; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; +import static org.elasticsearch.index.search.stats.ShardSearchPhaseAPMMetrics.FETCH_SEARCH_PHASE_METRIC; +import static org.elasticsearch.index.search.stats.ShardSearchPhaseAPMMetrics.QUERY_SEARCH_PHASE_METRIC; +import static org.elasticsearch.index.search.stats.ShardSearchPhaseAPMMetrics.SYSTEM_THREAD_ATTRIBUTE_NAME; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; + +public class ShardSearchPhaseAPMMetricsTests extends ESSingleNodeTestCase { + + private static final String indexName = "test_search_metrics2"; + private final int num_primaries = randomIntBetween(2, 7); + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Before + private void setUpIndex() throws Exception { + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, num_primaries) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(indexName); + + prepareIndex(indexName).setId("1").setSource("body", "doc1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(indexName).setId("2").setSource("body", "doc2").setRefreshPolicy(IMMEDIATE).get(); + + prepareIndex(TestSystemIndexPlugin.INDEX_NAME).setId("1").setSource("body", "doc1").setRefreshPolicy(IMMEDIATE).get(); + prepareIndex(TestSystemIndexPlugin.INDEX_NAME).setId("2").setSource("body", "doc2").setRefreshPolicy(IMMEDIATE).get(); + } + + @After + private void afterTest() { + resetMeter(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(TestTelemetryPlugin.class, TestSystemIndexPlugin.class); + } + + public void testMetricsDfsQueryThenFetch() throws InterruptedException { + checkMetricsDfsQueryThenFetch(indexName, false); + } + + public void testMetricsDfsQueryThenFetchSystem() throws InterruptedException { + checkMetricsDfsQueryThenFetch(TestSystemIndexPlugin.INDEX_NAME, true); + } + + private void checkMetricsDfsQueryThenFetch(String indexName, boolean isSystemIndex) throws InterruptedException { + assertSearchHitsWithoutFailures( + client().prepareSearch(indexName).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("doc1")), + "1" + ); + checkNumberOfMeasurementsForPhase(QUERY_SEARCH_PHASE_METRIC, isSystemIndex); + assertNotEquals(0, getNumberOfMeasurementsForPhase(FETCH_SEARCH_PHASE_METRIC)); + checkMetricsAttributes(isSystemIndex); + } + + public void testSearchTransportMetricsQueryThenFetch() throws InterruptedException { + checkSearchTransportMetricsQueryThenFetch(indexName, false); + } + + public void testSearchTransportMetricsQueryThenFetchSystem() throws InterruptedException { + checkSearchTransportMetricsQueryThenFetch(TestSystemIndexPlugin.INDEX_NAME, true); + } + + private void checkSearchTransportMetricsQueryThenFetch(String indexName, boolean isSystemIndex) throws InterruptedException { + assertSearchHitsWithoutFailures( + client().prepareSearch(indexName).setSearchType(SearchType.QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("doc1")), + "1" + ); + checkNumberOfMeasurementsForPhase(QUERY_SEARCH_PHASE_METRIC, isSystemIndex); + assertNotEquals(0, getNumberOfMeasurementsForPhase(FETCH_SEARCH_PHASE_METRIC)); + checkMetricsAttributes(isSystemIndex); + } + + public void testSearchTransportMetricsScroll() throws InterruptedException { + checkSearchTransportMetricsScroll(indexName, false); + } + + public void testSearchTransportMetricsScrollSystem() throws InterruptedException { + checkSearchTransportMetricsScroll(TestSystemIndexPlugin.INDEX_NAME, true); + } + + private void checkSearchTransportMetricsScroll(String indexName, boolean isSystemIndex) throws InterruptedException { + assertScrollResponsesAndHitCount( + client(), + TimeValue.timeValueSeconds(60), + client().prepareSearch(indexName) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setSize(1) + .setQuery(simpleQueryStringQuery("doc1 doc2")), + 2, + (respNum, response) -> { + // No hits, no fetching done + assertEquals(isSystemIndex ? 1 : num_primaries, getNumberOfMeasurementsForPhase(QUERY_SEARCH_PHASE_METRIC)); + if (response.getHits().getHits().length > 0) { + assertNotEquals(0, getNumberOfMeasurementsForPhase(FETCH_SEARCH_PHASE_METRIC)); + } else { + assertEquals(isSystemIndex ? 1 : 0, getNumberOfMeasurementsForPhase(FETCH_SEARCH_PHASE_METRIC)); + } + checkMetricsAttributes(isSystemIndex); + resetMeter(); + } + ); + + } + + private void resetMeter() { + getTestTelemetryPlugin().resetMeter(); + } + + private TestTelemetryPlugin getTestTelemetryPlugin() { + return getInstanceFromNode(PluginsService.class).filterPlugins(TestTelemetryPlugin.class).toList().get(0); + } + + private void checkNumberOfMeasurementsForPhase(String phase, boolean isSystemIndex) { + int numMeasurements = getNumberOfMeasurementsForPhase(phase); + assertEquals(isSystemIndex ? 1 : num_primaries, numMeasurements); + } + + private int getNumberOfMeasurementsForPhase(String phase) { + final List measurements = getTestTelemetryPlugin().getLongHistogramMeasurement(phase); + return measurements.size(); + } + + private void checkMetricsAttributes(boolean isSystem) { + final List queryMeasurements = getTestTelemetryPlugin().getLongHistogramMeasurement(QUERY_SEARCH_PHASE_METRIC); + final List fetchMeasurements = getTestTelemetryPlugin().getLongHistogramMeasurement(QUERY_SEARCH_PHASE_METRIC); + assertTrue( + Stream.concat(queryMeasurements.stream(), fetchMeasurements.stream()).allMatch(m -> checkMeasurementAttributes(m, isSystem)) + ); + } + + private boolean checkMeasurementAttributes(Measurement m, boolean isSystem) { + return ((boolean) m.attributes().get(SYSTEM_THREAD_ATTRIBUTE_NAME)) == isSystem; + } + + public static class TestSystemIndexPlugin extends Plugin implements SystemIndexPlugin { + + static final String INDEX_NAME = ".test-system-index"; + + public TestSystemIndexPlugin() {} + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + return List.of( + SystemIndexDescriptor.builder() + .setIndexPattern(INDEX_NAME + "*") + .setPrimaryIndex(INDEX_NAME) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .setMappings(""" + { + "_meta": { + "version": "8.0.0", + "managed_index_mappings_version": 3 + }, + "properties": { + "body": { "type": "keyword" } + } + } + """) + .setThreadPools(ExecutorNames.DEFAULT_SYSTEM_INDEX_THREAD_POOLS) + .setOrigin(ShardSearchPhaseAPMMetricsTests.class.getSimpleName()) + .build() + ); + } + + @Override + public String getFeatureName() { + return ShardSearchPhaseAPMMetricsTests.class.getSimpleName(); + } + + @Override + public String getFeatureDescription() { + return "test plugin"; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index e0363d84ea4d2..077877f713571 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -55,7 +55,6 @@ import org.elasticsearch.action.search.SearchPhaseController; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchTransportAPMMetrics; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionFilters; @@ -2492,7 +2491,6 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { indexNameExpressionResolver, namedWriteableRegistry, EmptySystemIndices.INSTANCE.getExecutorSelector(), - new SearchTransportAPMMetrics(TelemetryProvider.NOOP.getMeterRegistry()), new SearchResponseMetrics(TelemetryProvider.NOOP.getMeterRegistry()), client, usageService diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index c0e55992df88f..5c6c3e8c7933c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -377,7 +377,8 @@ public void testOnIndexModuleIsNoOpWithSecurityDisabled() throws Exception { TestIndexNameExpressionResolver.newInstance(threadPool.getThreadContext()), Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + List.of() ); security.onIndexModule(indexModule); // indexReaderWrapper is a SetOnce so if Security#onIndexModule had already set an ReaderWrapper we would get an exception here diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index 70896a67a9468..e8d6a2868a496 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -70,7 +70,8 @@ public void testWatcherDisabledTests() throws Exception { TestIndexNameExpressionResolver.newInstance(), Collections.emptyMap(), mock(SlowLogFieldProvider.class), - MapperMetrics.NOOP + MapperMetrics.NOOP, + List.of() ); // this will trip an assertion if the watcher indexing operation listener is null (which it is) but we try to add it watcher.onIndexModule(indexModule); From f18c7c8bee7d255c80fc8bd08ab09f48f04cb68b Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Wed, 13 Nov 2024 17:22:14 +0100 Subject: [PATCH 92/95] Use Long instead of Double for allocation disk usage APM metrics (#116732) I was trying to build a dashboard on top of these metrics and came across some zeros and negative values that I found a bit surprising. Also by mistake some long values are exposed as Double metrics. I've updated the metric test to make sure we have more concrete assertions. (note that the desired balance disk usage metric is double, so I'm keeping it as is). --- .../DesiredBalanceReconcilerMetricsIT.java | 29 ++++++++++++++----- .../allocator/DesiredBalanceMetrics.java | 20 ++++++------- .../cluster/ClusterInfoServiceUtils.java | 5 ++++ 3 files changed, 36 insertions(+), 18 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java index 36374f7a3a8eb..9a71bf86388a4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerMetricsIT.java @@ -9,8 +9,12 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.ClusterInfoServiceUtils; +import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.telemetry.TestTelemetryPlugin; @@ -56,8 +60,15 @@ public void testDesiredBalanceGaugeMetricsAreOnlyPublishedByCurrentMaster() thro public void testDesiredBalanceMetrics() { internalCluster().startNodes(2); prepareCreate("test").setSettings(indexSettings(2, 1)).get(); - indexRandom(randomBoolean(), "test", between(50, 100)); ensureGreen(); + + indexRandom(randomBoolean(), "test", between(50, 100)); + flush("test"); + // Make sure new cluster info is available + final var infoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance(ClusterInfoService.class); + ClusterInfoServiceUtils.setUpdateFrequency(infoService, TimeValue.timeValueMillis(200)); + assertNotNull("info should not be null", ClusterInfoServiceUtils.refresh(infoService)); + final var telemetryPlugin = getTelemetryPlugin(internalCluster().getMasterName()); telemetryPlugin.collect(); assertThat(telemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.UNASSIGNED_SHARDS_METRIC_NAME), not(empty())); @@ -73,7 +84,7 @@ public void testDesiredBalanceMetrics() { ); assertThat(desiredBalanceNodeWeightsMetrics.size(), equalTo(2)); for (var nodeStat : desiredBalanceNodeWeightsMetrics) { - assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertTrue(nodeStat.isDouble()); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } @@ -122,15 +133,16 @@ public void testDesiredBalanceMetrics() { assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var currentNodeDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var currentNodeDiskUsageMetrics = telemetryPlugin.getLongGaugeMeasurement( DesiredBalanceMetrics.CURRENT_NODE_DISK_USAGE_METRIC_NAME ); assertThat(currentNodeDiskUsageMetrics.size(), equalTo(2)); for (var nodeStat : currentNodeDiskUsageMetrics) { - assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat(nodeStat.value().longValue(), greaterThanOrEqualTo(0L)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } + assertTrue(currentNodeDiskUsageMetrics.stream().anyMatch(m -> m.getLong() > 0L)); final var currentNodeUndesiredShardCountMetrics = telemetryPlugin.getLongGaugeMeasurement( DesiredBalanceMetrics.CURRENT_NODE_UNDESIRED_SHARD_COUNT_METRIC_NAME ); @@ -140,15 +152,16 @@ public void testDesiredBalanceMetrics() { assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } - final var currentNodeForecastedDiskUsageMetrics = telemetryPlugin.getDoubleGaugeMeasurement( + final var currentNodeForecastedDiskUsageMetrics = telemetryPlugin.getLongGaugeMeasurement( DesiredBalanceMetrics.CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME ); assertThat(currentNodeForecastedDiskUsageMetrics.size(), equalTo(2)); for (var nodeStat : currentNodeForecastedDiskUsageMetrics) { - assertThat(nodeStat.value().doubleValue(), greaterThanOrEqualTo(0.0)); + assertThat(nodeStat.value().longValue(), greaterThanOrEqualTo(0L)); assertThat((String) nodeStat.attributes().get("node_id"), is(in(nodeIds))); assertThat((String) nodeStat.attributes().get("node_name"), is(in(nodeNames))); } + assertTrue(currentNodeForecastedDiskUsageMetrics.stream().anyMatch(m -> m.getLong() > 0L)); } private static void assertOnlyMasterIsPublishingMetrics() { @@ -182,10 +195,10 @@ private static void assertMetricsAreBeingPublished(String nodeName, boolean shou matcher ); assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_WRITE_LOAD_METRIC_NAME), matcher); - assertThat(testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_DISK_USAGE_METRIC_NAME), matcher); + assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_DISK_USAGE_METRIC_NAME), matcher); assertThat(testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_SHARD_COUNT_METRIC_NAME), matcher); assertThat( - testTelemetryPlugin.getDoubleGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME), + testTelemetryPlugin.getLongGaugeMeasurement(DesiredBalanceMetrics.CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME), matcher ); assertThat( diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java index 3ed5bc269e6c4..cf8840dc95724 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceMetrics.java @@ -136,7 +136,7 @@ public DesiredBalanceMetrics(MeterRegistry meterRegistry) { "threads", this::getCurrentNodeWriteLoadMetrics ); - meterRegistry.registerDoublesGauge( + meterRegistry.registerLongsGauge( CURRENT_NODE_DISK_USAGE_METRIC_NAME, "The current disk usage of nodes", "bytes", @@ -148,7 +148,7 @@ public DesiredBalanceMetrics(MeterRegistry meterRegistry) { "unit", this::getCurrentNodeShardCountMetrics ); - meterRegistry.registerDoublesGauge( + meterRegistry.registerLongsGauge( CURRENT_NODE_FORECASTED_DISK_USAGE_METRIC_NAME, "The current forecasted disk usage of nodes", "bytes", @@ -231,16 +231,16 @@ private List getDesiredBalanceNodeShardCountMetrics() { return values; } - private List getCurrentNodeDiskUsageMetrics() { + private List getCurrentNodeDiskUsageMetrics() { if (nodeIsMaster == false) { return List.of(); } var stats = allocationStatsPerNodeRef.get(); - List doubles = new ArrayList<>(stats.size()); + List values = new ArrayList<>(stats.size()); for (var node : stats.keySet()) { - doubles.add(new DoubleWithAttributes(stats.get(node).currentDiskUsage(), getNodeAttributes(node))); + values.add(new LongWithAttributes(stats.get(node).currentDiskUsage(), getNodeAttributes(node))); } - return doubles; + return values; } private List getCurrentNodeWriteLoadMetrics() { @@ -267,16 +267,16 @@ private List getCurrentNodeShardCountMetrics() { return values; } - private List getCurrentNodeForecastedDiskUsageMetrics() { + private List getCurrentNodeForecastedDiskUsageMetrics() { if (nodeIsMaster == false) { return List.of(); } var stats = allocationStatsPerNodeRef.get(); - List doubles = new ArrayList<>(stats.size()); + List values = new ArrayList<>(stats.size()); for (var node : stats.keySet()) { - doubles.add(new DoubleWithAttributes(stats.get(node).forecastedDiskUsage(), getNodeAttributes(node))); + values.add(new LongWithAttributes(stats.get(node).forecastedDiskUsage(), getNodeAttributes(node))); } - return doubles; + return values; } private List getCurrentNodeUndesiredShardCountMetrics() { diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ClusterInfoServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/cluster/ClusterInfoServiceUtils.java index b4b35c0487d6e..bd93700fd4137 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ClusterInfoServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ClusterInfoServiceUtils.java @@ -13,6 +13,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.service.ClusterApplierService; +import org.elasticsearch.core.TimeValue; import java.util.concurrent.TimeUnit; @@ -37,4 +38,8 @@ protected boolean blockingAllowed() { throw new AssertionError(e); } } + + public static void setUpdateFrequency(InternalClusterInfoService internalClusterInfoService, TimeValue updateFrequency) { + internalClusterInfoService.setUpdateFrequency(updateFrequency); + } } From 1165a5f88ded71a405b05be8f72a2253a50822c5 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 14 Nov 2024 03:28:16 +1100 Subject: [PATCH 93/95] Mute org.elasticsearch.action.search.SearchRequestTests testSerializationConstants #116752 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index b860f4b6c4b5f..7cfbe3513e52e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -241,6 +241,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/116730 - class: org.elasticsearch.xpack.inference.InferenceRestIT issue: https://github.com/elastic/elasticsearch/issues/116740 +- class: org.elasticsearch.action.search.SearchRequestTests + method: testSerializationConstants + issue: https://github.com/elastic/elasticsearch/issues/116752 # Examples: # From c05a87a3e732f7290fe41f95629f8574473d52b9 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 14 Nov 2024 03:28:27 +1100 Subject: [PATCH 94/95] Mute org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryGroupsResolverTests org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryGroupsResolverTests #116182 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 7cfbe3513e52e..22c304dc13b62 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -244,6 +244,8 @@ tests: - class: org.elasticsearch.action.search.SearchRequestTests method: testSerializationConstants issue: https://github.com/elastic/elasticsearch/issues/116752 +- class: org.elasticsearch.xpack.security.authc.ldap.ActiveDirectoryGroupsResolverTests + issue: https://github.com/elastic/elasticsearch/issues/116182 # Examples: # From 15930cdbdfaa3272cafce2a5968920e0e39e5a08 Mon Sep 17 00:00:00 2001 From: Dan Rubinstein Date: Wed, 13 Nov 2024 12:47:30 -0500 Subject: [PATCH 95/95] Removing testGet from muted tests as it no longer exists (#116735) --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 22c304dc13b62..3aeadd9d141b5 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -97,9 +97,6 @@ tests: - class: org.elasticsearch.xpack.inference.TextEmbeddingCrudIT method: testPutE5Small_withPlatformSpecificVariant issue: https://github.com/elastic/elasticsearch/issues/113950 -- class: org.elasticsearch.xpack.inference.InferenceCrudIT - method: testGet - issue: https://github.com/elastic/elasticsearch/issues/114135 - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/rest-api/usage/line_38} issue: https://github.com/elastic/elasticsearch/issues/113694