diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index fda4315926b6b..207a332ed6717 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -88,6 +88,7 @@ steps: - openjdk17 - openjdk21 - openjdk22 + - openjdk23 GRADLE_TASK: - checkPart1 - checkPart2 @@ -113,6 +114,7 @@ steps: - openjdk17 - openjdk21 - openjdk22 + - openjdk23 BWC_VERSION: $BWC_LIST agents: provider: gcp diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 7c5f683cf9692..7ba46f0f0951c 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -735,6 +735,7 @@ steps: - openjdk17 - openjdk21 - openjdk22 + - openjdk23 GRADLE_TASK: - checkPart1 - checkPart2 @@ -760,6 +761,7 @@ steps: - openjdk17 - openjdk21 - openjdk22 + - openjdk23 BWC_VERSION: ["7.17.22", "8.13.5", "8.14.0", "8.15.0"] agents: provider: gcp diff --git a/distribution/packages/src/deb/lintian/elasticsearch b/distribution/packages/src/deb/lintian/elasticsearch index a6a46bb41f112..edd705b66caaa 100644 --- a/distribution/packages/src/deb/lintian/elasticsearch +++ b/distribution/packages/src/deb/lintian/elasticsearch @@ -59,3 +59,7 @@ unknown-field License # don't build them ourselves and the license precludes us modifying them # to fix this. library-not-linked-against-libc usr/share/elasticsearch/modules/x-pack-ml/platform/linux-x86_64/lib/libmkl_*.so + +# shared-lib-without-dependency-information (now shared-library-lacks-prerequisites) is falsely reported for libvec.so +# which has no dependencies (not even libc) besides the symbols in the base executable. +shared-lib-without-dependency-information usr/share/elasticsearch/lib/platform/linux-x64/libvec.so diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index 04079284b3ec9..8cfe9a1f03914 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -77,6 +77,7 @@ static List systemJvmOptions(Settings nodeSettings, final Map sysprops) { // working dir is ES installation, so we use relative path here Path platformDir = Paths.get("lib", "platform"); diff --git a/docs/build.gradle b/docs/build.gradle index 0eba980e8cc31..7ca4820eea1af 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1752,6 +1752,7 @@ setups['setup-snapshots'] = setups['setup-repository'] + ''' name: "my_admin_role" body: > { + "description": "Grants full access to all management features within the cluster.", "cluster": ["all"], "indices": [ {"names": ["index1", "index2" ], "privileges": ["all"], "field_security" : {"grant" : [ "title", "body" ]}} diff --git a/docs/changelog/106486.yaml b/docs/changelog/106486.yaml new file mode 100644 index 0000000000000..b33df50780e02 --- /dev/null +++ b/docs/changelog/106486.yaml @@ -0,0 +1,17 @@ +pr: 106486 +summary: Create custom parser for ISO-8601 datetimes +area: Infra/Core +type: enhancement +issues: + - 102063 +highlight: + title: New custom parser for ISO-8601 datetimes + body: |- + This introduces a new custom parser for ISO-8601 datetimes, for the `iso8601`, `strict_date_optional_time`, and + `strict_date_optional_time_nanos` built-in date formats. This provides a performance improvement over the + default Java date-time parsing. Whilst it maintains much of the same behaviour, + the new parser does not accept nonsensical date-time strings that have multiple fractional seconds fields + or multiple timezone specifiers. If the new parser fails to parse a string, it will then use the previous parser + to parse it. If a large proportion of the input data consists of these invalid strings, this may cause + a small performance degradation. If you wish to force the use of the old parsers regardless, + set the JVM property `es.datetime.java_time_parsers=true` on all ES nodes. diff --git a/docs/changelog/107886.yaml b/docs/changelog/107886.yaml new file mode 100644 index 0000000000000..a328bc2a2a208 --- /dev/null +++ b/docs/changelog/107886.yaml @@ -0,0 +1,5 @@ +pr: 107886 +summary: Cluster state role mapper file settings service +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/108452.yaml b/docs/changelog/108452.yaml new file mode 100644 index 0000000000000..fdf531602c806 --- /dev/null +++ b/docs/changelog/108452.yaml @@ -0,0 +1,5 @@ +pr: 108452 +summary: Add the rerank task to the Elasticsearch internal inference service +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/108517.yaml b/docs/changelog/108517.yaml new file mode 100644 index 0000000000000..359c8302fdf6c --- /dev/null +++ b/docs/changelog/108517.yaml @@ -0,0 +1,6 @@ +pr: 108517 +summary: Forward `indexServiceSafe` exception to listener +area: Transform +type: bug +issues: + - 108418 diff --git a/docs/changelog/108518.yaml b/docs/changelog/108518.yaml new file mode 100644 index 0000000000000..aad823ccc89f6 --- /dev/null +++ b/docs/changelog/108518.yaml @@ -0,0 +1,5 @@ +pr: 108518 +summary: Remove leading is_ prefix from Enterprise geoip docs +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/108521.yaml b/docs/changelog/108521.yaml new file mode 100644 index 0000000000000..adc7c11a4decd --- /dev/null +++ b/docs/changelog/108521.yaml @@ -0,0 +1,6 @@ +pr: 108521 +summary: Adding override for lintian false positive on `libvec.so` +area: "Packaging" +type: bug +issues: + - 108514 diff --git a/docs/changelog/108522.yaml b/docs/changelog/108522.yaml new file mode 100644 index 0000000000000..5bc064d7995e9 --- /dev/null +++ b/docs/changelog/108522.yaml @@ -0,0 +1,5 @@ +pr: 108522 +summary: Ensure we return non-negative scores when scoring scalar dot-products +area: Vector Search +type: bug +issues: [] diff --git a/docs/changelog/108562.yaml b/docs/changelog/108562.yaml new file mode 100644 index 0000000000000..2a0047fe807fd --- /dev/null +++ b/docs/changelog/108562.yaml @@ -0,0 +1,6 @@ +pr: 108562 +summary: Add `internalClusterTest` for and fix leak in `ExpandSearchPhase` +area: Search +type: bug +issues: + - 108369 diff --git a/docs/changelog/108571.yaml b/docs/changelog/108571.yaml new file mode 100644 index 0000000000000..b863ac90d9e5f --- /dev/null +++ b/docs/changelog/108571.yaml @@ -0,0 +1,5 @@ +pr: 108571 +summary: Workaround G1 bug for JDK 22 and 22.0.1 +area: Infra/CLI +type: bug +issues: [] diff --git a/docs/internal/DistributedArchitectureGuide.md b/docs/internal/DistributedArchitectureGuide.md index 59305c6305737..732e2e7be46fa 100644 --- a/docs/internal/DistributedArchitectureGuide.md +++ b/docs/internal/DistributedArchitectureGuide.md @@ -1,6 +1,14 @@ -# Distributed Area Team Internals +# Distributed Area Internals -(Summary, brief discussion of our features) +The Distributed Area contains indexing and coordination systems. + +The index path stretches from the user REST command through shard routing down to each individual shard's translog and storage +engine. Reindexing is effectively reading from a source index and writing to a destination index (perhaps on different nodes). +The coordination side includes cluster coordination, shard allocation, cluster autoscaling stats, task management, and cross +cluster replication. Less obvious coordination systems include networking, the discovery plugin system, the snapshot/restore +logic, and shard recovery. + +A guide to the general Elasticsearch components can be found [here](https://github.com/elastic/elasticsearch/blob/main/docs/internal/GeneralArchitectureGuide.md). # Networking @@ -237,9 +245,101 @@ works in parallel with the storage engine.) # Autoscaling -(Reactive and proactive autoscaling. Explain that we surface recommendations, how control plane uses it.) - -(Sketch / list the different deciders that we have, and then also how we use information from each to make a recommendation.) +The Autoscaling API in ES (Elasticsearch) uses cluster and node level statistics to provide a recommendation +for a cluster size to support the current cluster data and active workloads. ES Autoscaling is paired +with an ES Cloud service that periodically polls the ES elected master node for suggested cluster +changes. The cloud service will add more resources to the cluster based on Elasticsearch's recommendation. +Elasticsearch by itself cannot automatically scale. + +Autoscaling recommendations are tailored for the user [based on user defined policies][], composed of data +roles (hot, frozen, etc) and [deciders][]. There's a public [webinar on autoscaling][], as well as the +public [Autoscaling APIs] docs. + +Autoscaling's current implementation is based primary on storage requirements, as well as memory capacity +for ML and frozen tier. It does not yet support scaling related to search load. Paired with ES Cloud, +autoscaling only scales upward, not downward, except for ML nodes that do get scaled up _and_ down. + +[based on user defined policies]: https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-autoscaling.html +[deciders]: https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-deciders.html +[webinar on autoscaling]: https://www.elastic.co/webinars/autoscaling-from-zero-to-production-seamlessly +[Autoscaling APIs]: https://www.elastic.co/guide/en/elasticsearch/reference/current/autoscaling-apis.html + +### Plugin REST and TransportAction entrypoints + +Autoscaling is a [plugin][]. All the REST APIs can be found in [autoscaling/rest/][]. +`GetAutoscalingCapacityAction` is the capacity calculation operation REST endpoint, as opposed to the +other rest commands that get/set/delete the policies guiding the capacity calculation. The Transport +Actions can be found in [autoscaling/action/], where [TransportGetAutoscalingCapacityAction][] is the +entrypoint on the master node for calculating the optimal cluster resources based on the autoscaling +policies. + +[plugin]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java#L72 +[autoscaling/rest/]: https://github.com/elastic/elasticsearch/tree/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/rest +[autoscaling/action/]: https://github.com/elastic/elasticsearch/tree/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action +[TransportGetAutoscalingCapacityAction]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityAction.java#L82-L98 + +### How cluster capacity is determined + +[AutoscalingMetadata][] implements [Metadata.Custom][] in order to persist autoscaling policies. Each +Decider is an implementation of [AutoscalingDeciderService][]. The [AutoscalingCalculateCapacityService][] +is responsible for running the calculation. + +[TransportGetAutoscalingCapacityAction.computeCapacity] is the entry point to [AutoscalingCalculateCapacityService.calculate], +which creates a [AutoscalingDeciderResults][] for [each autoscaling policy][]. [AutoscalingDeciderResults.toXContent][] then +determines the [maximum required capacity][] to return to the caller. [AutoscalingCapacity][] is the base unit of a cluster +resources recommendation. + +The `TransportGetAutoscalingCapacityAction` response is cached to prevent concurrent callers +overloading the system: the operation is expensive. `TransportGetAutoscalingCapacityAction` contains +a [CapacityResponseCache][]. `TransportGetAutoscalingCapacityAction.masterOperation` +calls [through the CapacityResponseCache][], into the `AutoscalingCalculateCapacityService`, to handle +concurrent callers. + +[AutoscalingMetadata]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java#L38 +[Metadata.Custom]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java#L141-L145 +[AutoscalingDeciderService]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingDeciderService.java#L16-L19 +[AutoscalingCalculateCapacityService]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java#L43 + +[TransportGetAutoscalingCapacityAction.computeCapacity]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityAction.java#L102-L108 +[AutoscalingCalculateCapacityService.calculate]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java#L108-L139 +[AutoscalingDeciderResults]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingDeciderResults.java#L34-L38 +[each autoscaling policy]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java#L124-L131 +[AutoscalingDeciderResults.toXContent]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingDeciderResults.java#L78 +[maximum required capacity]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingDeciderResults.java#L105-L116 +[AutoscalingCapacity]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCapacity.java#L27-L35 + +[CapacityResponseCache]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityAction.java#L44-L47 +[through the CapacityResponseCache]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingCapacityAction.java#L97 + +### Where the data comes from + +The Deciders each pull data from different sources as needed to inform their decisions. The +[DiskThresholdMonitor][] is one such data source. The Monitor runs on the master node and maintains +lists of nodes that exceed various disk size thresholds. [DiskThresholdSettings][] contains the +threshold settings with which the `DiskThresholdMonitor` runs. + +[DiskThresholdMonitor]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java#L53-L58 +[DiskThresholdSettings]: https://github.com/elastic/elasticsearch/blob/v8.13.2/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdSettings.java#L24-L27 + +### Deciders + +The `ReactiveStorageDeciderService` tracks information that demonstrates storage limitations are causing +problems in the cluster. It uses [an algorithm defined here][]. Some examples are +- information from the `DiskThresholdMonitor` to find out whether nodes are exceeding their storage capacity +- number of unassigned shards that failed allocation because of insufficient storage +- the max shard size and minimum node size, and whether these can be satisfied with the existing infrastructure + +[an algorithm defined here]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java#L158-L176 + +The `ProactiveStorageDeciderService` maintains a forecast window that [defaults to 30 minutes][]. It only +runs on data streams (ILM, rollover, etc), not regular indexes. It looks at past [index changes][] that +took place within the forecast window to [predict][] resources that will be needed shortly. + +[defaults to 30 minutes]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageDeciderService.java#L32 +[index changes]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageDeciderService.java#L79-L83 +[predict]: https://github.com/elastic/elasticsearch/blob/v8.13.2/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageDeciderService.java#L85-L95 + +There are several more Decider Services, implementing the `AutoscalingDeciderService` interface. # Snapshot / Restore diff --git a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc index 410bec7ac38ac..6d06e7e6b9045 100644 --- a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc +++ b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc @@ -30,13 +30,13 @@ Returns information about all stored connector sync jobs ordered by their creati (Optional, integer) The offset from the first result to fetch. Defaults to `0`. `status`:: -(Optional, job status) The job status the fetched sync jobs need to have. +(Optional, job status) A comma-separated list of job statuses to filter the results. Available statuses include: `canceling`, `canceled`, `completed`, `error`, `in_progress`, `pending`, `suspended`. `connector_id`:: (Optional, string) The connector id the fetched sync jobs need to have. `job_type`:: -(Optional, job type) A comma-separated list of job types. +(Optional, job type) A comma-separated list of job types. Available job types are: `full`, `incremental` and `access_control`. [[list-connector-sync-jobs-api-example]] ==== {api-examples-title} diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 47c3529ceef40..c96f0c7342a96 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -602,7 +602,7 @@ stream's oldest backing index. // TESTRESPONSE[s/"index_uuid": "_eEfRrFHS9OyhqWntkgHAQ"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-2099.03.07-000001"/"index_name": $body.data_streams.0.indices.0.index_name/] // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-2099.03.08-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] <1> First item in the `indices` array for `my-data-stream`. This item contains information about the stream's oldest backing index, diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc index 5e0c09f9d2be2..8f6b39d2aa0dd 100644 --- a/docs/reference/data-streams/downsampling-manual.asciidoc +++ b/docs/reference/data-streams/downsampling-manual.asciidoc @@ -389,7 +389,7 @@ This returns: // TESTRESPONSE[s/"ltOJGmqgTVm4T-Buoe7Acg"/$body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"2023-07-26T09:26:42.000Z"/$body.data_streams.0.time_series.temporal_ranges.0.start/] // TESTRESPONSE[s/"2023-07-26T13:26:42.000Z"/$body.data_streams.0.time_series.temporal_ranges.0.end/] -// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_indices":[],"failure_store":false/] +// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] <1> The backing index for this data stream. Before a backing index can be downsampled, the TSDS needs to be rolled over and diff --git a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc index 6bfa9ad9b00c5..b89f55dd41575 100644 --- a/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc +++ b/docs/reference/data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc @@ -147,7 +147,7 @@ and that the next generation index will also be managed by {ilm-init}: // TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] <1> The name of the backing index. <2> For each backing index we display the value of the <> @@ -284,7 +284,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "xCEhwsp8Tey0-FLNFYVwSg"/"index_uuid": $body.data_streams.0.indices.0.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000002"/"index_name": $body.data_streams.0.indices.1.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] <1> The existing backing index will continue to be managed by {ilm-init} <2> The existing backing index will continue to be managed by {ilm-init} @@ -364,7 +364,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] <1> The backing indices that existed before rollover will continue to be managed by {ilm-init} <2> The backing indices that existed before rollover will continue to be managed by {ilm-init} @@ -462,7 +462,7 @@ GET _data_stream/dsl-data-stream // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8DJ5gw"/"index_uuid": $body.data_streams.0.indices.1.index_uuid/] // TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/] // TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/] -// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/] +// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] <1> The write index is now managed by {ilm-init} <2> The `lifecycle` configured on the data stream is now disabled. <3> The next write index will be managed by {ilm-init} diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 240a33164b379..0a318cd135914 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -358,4 +358,4 @@ The API returns the following response: // TESTRESPONSE[s/"index_name": ".ds-my-data-stream-two-2099.03.08-000001"/"index_name": $body.data_streams.1.indices.0.index_name/] // TESTRESPONSE[s/"index_uuid": "3liBu2SYS5axasRt6fUIpA"/"index_uuid": $body.data_streams.1.indices.0.index_uuid/] // TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW"/] -// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_indices":[],"failure_store":false/] +// TESTRESPONSE[s/"replicated": false/"replicated": false,"failure_store":{"enabled": false, "indices": [], "rollover_on_write": false}/] diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 5d6ede6acd5ac..6272f4529c5f9 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -11,6 +11,8 @@ independently of each other. TIP: When ingesting key-value pairs with a large, arbitrary set of keys, you might consider modeling each key-value pair as its own nested document with `key` and `value` fields. Instead, consider using the <> data type, which maps an entire object as a single field and allows for simple searches over its contents. Nested documents and queries are typically expensive, so using the `flattened` data type for this use case is a better option. +WARNING: Nested fields have incomplete support in Kibana. While they are visible and searchable in Discover, they cannot be used to build visualizations in Lens. + [[nested-arrays-flattening-objects]] ==== How arrays of objects are flattened diff --git a/docs/reference/rest-api/security/create-roles.asciidoc b/docs/reference/rest-api/security/create-roles.asciidoc index 4f41c0b54bb1d..75f1d7c799187 100644 --- a/docs/reference/rest-api/security/create-roles.asciidoc +++ b/docs/reference/rest-api/security/create-roles.asciidoc @@ -50,6 +50,9 @@ privilege or action. `cluster`:: (list) A list of cluster privileges. These privileges define the cluster level actions that users with this role are able to execute. +`description`:: (string) A description of the role. +The maximum length is `1000` chars. + `global`:: (object) An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. @@ -104,6 +107,7 @@ The following example adds a role called `my_admin_role`: -------------------------------------------------- POST /_security/role/my_admin_role { + "description": "Grants full access to all management features within the cluster.", "cluster": ["all"], "indices": [ { diff --git a/docs/reference/rest-api/security/get-roles.asciidoc b/docs/reference/rest-api/security/get-roles.asciidoc index 80f0fd587aae8..3eb5a735194c6 100644 --- a/docs/reference/rest-api/security/get-roles.asciidoc +++ b/docs/reference/rest-api/security/get-roles.asciidoc @@ -61,6 +61,7 @@ GET /_security/role/my_admin_role -------------------------------------------------- { "my_admin_role": { + "description": "Grants full access to all management features within the cluster.", "cluster" : [ "all" ], "indices" : [ { diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index 9153b5fbdcab3..be30db4d100bd 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -85,6 +85,9 @@ All {Ilm} operations related to managing policies. `manage_index_templates`:: All operations on index templates. +`manage_inference`:: +All operations related to managing {infer}. + `manage_ingest_pipelines`:: All operations on ingest pipelines. @@ -192,6 +195,9 @@ node info, node and cluster stats, and pending cluster tasks. `monitor_enrich`:: All read-only operations related to managing and executing enrich policies. +`monitor_inference`:: +All read-only operations related to {infer}. + `monitor_ml`:: All read-only {ml} operations, such as getting information about {dfeeds}, jobs, model snapshots, or results. diff --git a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java index 9b452219bd635..5231bb8e3c67f 100644 --- a/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java +++ b/libs/vec/src/main21/java/org/elasticsearch/vec/internal/Int7DotProduct.java @@ -47,10 +47,11 @@ public float score(int firstOrd, int secondOrd) throws IOException { if (firstSeg != null && secondSeg != null) { int dotProduct = dotProduct7u(firstSeg, secondSeg, length); + assert dotProduct >= 0; float adjustedDistance = dotProduct * scoreCorrectionConstant + firstOffset + secondOffset; - return (1 + adjustedDistance) / 2; + return Math.max((1 + adjustedDistance) / 2, 0f); } else { - return fallbackScore(firstByteOffset, secondByteOffset); + return Math.max(fallbackScore(firstByteOffset, secondByteOffset), 0f); } } } diff --git a/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java b/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java index 246ddaeb2ebcf..07d30a887c683 100644 --- a/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java +++ b/libs/vec/src/test/java/org/elasticsearch/vec/VectorScorerFactoryTests.java @@ -28,6 +28,7 @@ import static org.elasticsearch.vec.VectorSimilarityType.EUCLIDEAN; import static org.elasticsearch.vec.VectorSimilarityType.MAXIMUM_INNER_PRODUCT; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; // @com.carrotsearch.randomizedtesting.annotations.Repeat(iterations = 100) public class VectorScorerFactoryTests extends AbstractVectorTestCase { @@ -96,6 +97,51 @@ void testSimpleImpl(long maxChunkSize) throws IOException { } } + public void testNonNegativeDotProduct() throws IOException { + assumeTrue(notSupportedMsg(), supported()); + var factory = AbstractVectorTestCase.factory.get(); + + try (Directory dir = new MMapDirectory(createTempDir(getTestName()), MMapDirectory.DEFAULT_MAX_CHUNK_SIZE)) { + // keep vecs `0` so dot product is `0` + byte[] vec1 = new byte[32]; + byte[] vec2 = new byte[32]; + String fileName = getTestName() + "-32"; + try (IndexOutput out = dir.createOutput(fileName, IOContext.DEFAULT)) { + var negativeOffset = floatToByteArray(-5f); + byte[] bytes = concat(vec1, negativeOffset, vec2, negativeOffset); + out.writeBytes(bytes, 0, bytes.length); + } + try (IndexInput in = dir.openInput(fileName, IOContext.DEFAULT)) { + // dot product + float expected = 0f; // TODO fix in Lucene: https://github.com/apache/lucene/pull/13356 luceneScore(DOT_PRODUCT, vec1, vec2, + // 1, -5, -5); + var scorer = factory.getInt7ScalarQuantizedVectorScorer(32, 2, 1, DOT_PRODUCT, in).get(); + assertThat(scorer.score(0, 1), equalTo(expected)); + assertThat(scorer.score(0, 1), greaterThanOrEqualTo(0f)); + assertThat((new VectorScorerSupplierAdapter(scorer)).scorer(0).score(1), equalTo(expected)); + // max inner product + expected = luceneScore(MAXIMUM_INNER_PRODUCT, vec1, vec2, 1, -5, -5); + scorer = factory.getInt7ScalarQuantizedVectorScorer(32, 2, 1, MAXIMUM_INNER_PRODUCT, in).get(); + assertThat(scorer.score(0, 1), greaterThanOrEqualTo(0f)); + assertThat(scorer.score(0, 1), equalTo(expected)); + assertThat((new VectorScorerSupplierAdapter(scorer)).scorer(0).score(1), equalTo(expected)); + // cosine + expected = 0f; // TODO fix in Lucene: https://github.com/apache/lucene/pull/13356 luceneScore(COSINE, vec1, vec2, 1, -5, + // -5); + scorer = factory.getInt7ScalarQuantizedVectorScorer(32, 2, 1, COSINE, in).get(); + assertThat(scorer.score(0, 1), equalTo(expected)); + assertThat(scorer.score(0, 1), greaterThanOrEqualTo(0f)); + assertThat((new VectorScorerSupplierAdapter(scorer)).scorer(0).score(1), equalTo(expected)); + // euclidean + expected = luceneScore(EUCLIDEAN, vec1, vec2, 1, -5, -5); + scorer = factory.getInt7ScalarQuantizedVectorScorer(32, 2, 1, EUCLIDEAN, in).get(); + assertThat(scorer.score(0, 1), equalTo(expected)); + assertThat(scorer.score(0, 1), greaterThanOrEqualTo(0f)); + assertThat((new VectorScorerSupplierAdapter(scorer)).scorer(0).score(1), equalTo(expected)); + } + } + } + public void testRandom() throws IOException { assumeTrue(notSupportedMsg(), supported()); testRandom(MMapDirectory.DEFAULT_MAX_CHUNK_SIZE, BYTE_ARRAY_RANDOM_INT7_FUNC); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index cf4eaab763011..2b1a8e1c0e318 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -1779,7 +1779,14 @@ public void testRemoveGhostReference() throws Exception { public ClusterState execute(ClusterState currentState) throws Exception { DataStream original = currentState.getMetadata().dataStreams().get(dataStreamName); DataStream broken = original.copy() - .setIndices(List.of(new Index(original.getIndices().get(0).getName(), "broken"), original.getIndices().get(1))) + .setBackingIndices( + original.getBackingIndices() + .copy() + .setIndices( + List.of(new Index(original.getIndices().get(0).getName(), "broken"), original.getIndices().get(1)) + ) + .build() + ) .build(); brokenDataStreamHolder.set(broken); return ClusterState.builder(currentState) diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java index 1d8de6b9ac5f6..27cd5697fd0f7 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/FailureStoreQueryParamIT.java @@ -58,7 +58,7 @@ public void setup() throws IOException { assertThat(dataStreams.size(), is(1)); Map dataStream = (Map) dataStreams.get(0); assertThat(dataStream.get("name"), equalTo(DATA_STREAM_NAME)); - List backingIndices = getBackingIndices(dataStream); + List backingIndices = getIndices(dataStream); assertThat(backingIndices.size(), is(1)); List failureStore = getFailureStore(dataStream); assertThat(failureStore.size(), is(1)); @@ -199,18 +199,16 @@ public void testPutIndexMappingApi() throws IOException { } } - private List getBackingIndices(Map response) { - return getIndices(response, "indices"); - } - + @SuppressWarnings("unchecked") private List getFailureStore(Map response) { - return getIndices(response, "failure_indices"); + var failureStore = (Map) response.get("failure_store"); + return getIndices(failureStore); } @SuppressWarnings("unchecked") - private List getIndices(Map response, String fieldName) { - List> indices = (List>) response.get(fieldName); + private List getIndices(Map response) { + List> indices = (List>) response.get("indices"); return indices.stream().map(index -> index.get("index_name")).toList(); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java index 721630d29b4c9..464a11ce8a062 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java @@ -37,7 +37,7 @@ public Set getFeatures() { DataStreamLifecycleHealthInfoPublisher.DSL_HEALTH_INFO_FEATURE, // Added in 8.12 LazyRolloverAction.DATA_STREAM_LAZY_ROLLOVER, // Added in 8.13 DataStreamAutoShardingService.DATA_STREAM_AUTO_SHARDING_FEATURE, - DataStreamGlobalRetention.GLOBAL_RETENTION // Added in 8.14 + DataStreamGlobalRetention.GLOBAL_RETENTION // Added in 8.14 ); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java index 88e529ec5569b..f5fa0db839230 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettingProvider; @@ -56,11 +57,11 @@ public class DataStreamIndexSettingsProvider implements IndexSettingProvider { @Override public Settings getAdditionalIndexSettings( String indexName, - String dataStreamName, - boolean timeSeries, + @Nullable String dataStreamName, + boolean isTimeSeries, Metadata metadata, Instant resolvedAt, - Settings allSettings, + Settings indexTemplateAndCreateRequestSettings, List combinedTemplateMappings ) { if (dataStreamName != null) { @@ -70,13 +71,13 @@ public Settings getAdditionalIndexSettings( // so checking that index_mode==null|standard and templateIndexMode == TIME_SERIES boolean migrating = dataStream != null && (dataStream.getIndexMode() == null || dataStream.getIndexMode() == IndexMode.STANDARD) - && timeSeries; + && isTimeSeries; IndexMode indexMode; if (migrating) { indexMode = IndexMode.TIME_SERIES; } else if (dataStream != null) { - indexMode = timeSeries ? dataStream.getIndexMode() : null; - } else if (timeSeries) { + indexMode = isTimeSeries ? dataStream.getIndexMode() : null; + } else if (isTimeSeries) { indexMode = IndexMode.TIME_SERIES; } else { indexMode = null; @@ -84,8 +85,8 @@ public Settings getAdditionalIndexSettings( if (indexMode != null) { if (indexMode == IndexMode.TIME_SERIES) { Settings.Builder builder = Settings.builder(); - TimeValue lookAheadTime = DataStreamsPlugin.getLookAheadTime(allSettings); - TimeValue lookBackTime = DataStreamsPlugin.LOOK_BACK_TIME.get(allSettings); + TimeValue lookAheadTime = DataStreamsPlugin.getLookAheadTime(indexTemplateAndCreateRequestSettings); + TimeValue lookBackTime = DataStreamsPlugin.LOOK_BACK_TIME.get(indexTemplateAndCreateRequestSettings); final Instant start; final Instant end; if (dataStream == null || migrating) { @@ -114,9 +115,13 @@ public Settings getAdditionalIndexSettings( builder.put(IndexSettings.TIME_SERIES_START_TIME.getKey(), FORMATTER.format(start)); builder.put(IndexSettings.TIME_SERIES_END_TIME.getKey(), FORMATTER.format(end)); - if (allSettings.hasValue(IndexMetadata.INDEX_ROUTING_PATH.getKey()) == false + if (indexTemplateAndCreateRequestSettings.hasValue(IndexMetadata.INDEX_ROUTING_PATH.getKey()) == false && combinedTemplateMappings.isEmpty() == false) { - List routingPaths = findRoutingPaths(indexName, allSettings, combinedTemplateMappings); + List routingPaths = findRoutingPaths( + indexName, + indexTemplateAndCreateRequestSettings, + combinedTemplateMappings + ); if (routingPaths.isEmpty() == false) { builder.putList(INDEX_ROUTING_PATH.getKey(), routingPaths); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java index c3e8331b856fd..a614a2dc40e25 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java @@ -155,7 +155,7 @@ static ClusterState removeDataStream( DataStream dataStream = currentState.metadata().dataStreams().get(dataStreamName); assert dataStream != null; backingIndicesToRemove.addAll(dataStream.getIndices()); - backingIndicesToRemove.addAll(dataStream.getFailureIndices()); + backingIndicesToRemove.addAll(dataStream.getFailureIndices().getIndices()); } // first delete the data streams and then the indices: diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java index f7064eb39a015..8017b1c72f862 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportAction.java @@ -145,8 +145,8 @@ static GetDataStreamAction.Response innerOperation( Map backingIndicesSettingsValues = new HashMap<>(); Metadata metadata = state.getMetadata(); collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getIndices()); - if (DataStream.isFailureStoreFeatureFlagEnabled() && dataStream.getFailureIndices().isEmpty() == false) { - collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getFailureIndices()); + if (DataStream.isFailureStoreFeatureFlagEnabled() && dataStream.getFailureIndices().getIndices().isEmpty() == false) { + collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getFailureIndices().getIndices()); } GetDataStreamAction.Response.TimeSeries timeSeries = null; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java index a6060923bd396..e3cdd6a8c14d9 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamGlobalRetentionAction.java @@ -64,7 +64,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(dryRun); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public boolean dryRun() { return dryRun; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java index 3fe9ae0758a91..3bd100a106dd6 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/DeleteDataStreamLifecycleAction.java @@ -48,6 +48,7 @@ public void writeTo(StreamOutput out) throws IOException { } public Request(String[] names) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.names = names; } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java index 51eb9e7e7e944..5816823ed710a 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamGlobalRetentionAction.java @@ -47,7 +47,9 @@ private GetDataStreamGlobalRetentionAction() {/* no instances */} public static final class Request extends MasterNodeReadRequest { - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java index a30af402a9186..cc61c7fe664be 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java @@ -43,7 +43,9 @@ public Request(StreamInput in) throws IOException { super(in); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } @Override public ActionRequestValidationException validate() { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java index 2aa5b4b4d3acd..65ca34a99da23 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamGlobalRetentionAction.java @@ -108,6 +108,7 @@ public void writeTo(StreamOutput out) throws IOException { } public Request(@Nullable TimeValue defaultRetention, @Nullable TimeValue maxRetention) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.globalRetention = new DataStreamGlobalRetention(defaultRetention, maxRetention); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java index 66133e9fbe0f2..4b0eaa6c46baf 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java @@ -139,7 +139,9 @@ public void testUpdateTimeSeriesTemporalRange_NoUpdateBecauseReplicated() { List.of(new Tuple<>(start.minus(4, ChronoUnit.HOURS), start), new Tuple<>(start, end)) ).getMetadata(); DataStream d = metadata.dataStreams().get(dataStreamName); - metadata = Metadata.builder(metadata).put(d.copy().setReplicated(true).setRolloverOnWrite(false).build()).build(); + metadata = Metadata.builder(metadata) + .put(d.copy().setReplicated(true).setBackingIndices(d.getBackingIndices().copy().setRolloverOnWrite(false).build()).build()) + .build(); now = now.plus(1, ChronoUnit.HOURS); ClusterState in = ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).build(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java index ec6e624794a03..4059127b5eb85 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java @@ -82,7 +82,7 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti .setIndexMode(IndexMode.STANDARD) .setLifecycle(new DataStreamLifecycle()) .setFailureStoreEnabled(true) - .setFailureIndices(failureStores) + .setFailureIndices(DataStream.DataStreamIndices.failureIndicesBuilder(failureStores).build()) .build(); String ilmPolicyName = "rollover-30days"; @@ -159,9 +159,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti ); if (DataStream.isFailureStoreFeatureFlagEnabled()) { - List failureStoresRepresentation = (List) dataStreamMap.get( - DataStream.FAILURE_INDICES_FIELD.getPreferredName() - ); + var failureStore = (Map) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName()); + List failureStoresRepresentation = (List) failureStore.get(DataStream.INDICES_FIELD.getPreferredName()); Map failureStoreRepresentation = (Map) failureStoresRepresentation.get(0); assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName())); assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false)); @@ -185,7 +184,7 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti .setIndexMode(IndexMode.STANDARD) .setLifecycle(new DataStreamLifecycle(null, null, false)) .setFailureStoreEnabled(true) - .setFailureIndices(failureStores) + .setFailureIndices(DataStream.DataStreamIndices.failureIndicesBuilder(failureStores).build()) .build(); String ilmPolicyName = "rollover-30days"; @@ -251,9 +250,8 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti ); if (DataStream.isFailureStoreFeatureFlagEnabled()) { - List failureStoresRepresentation = (List) dataStreamMap.get( - DataStream.FAILURE_INDICES_FIELD.getPreferredName() - ); + var failureStore = (Map) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName()); + List failureStoresRepresentation = (List) failureStore.get(DataStream.INDICES_FIELD.getPreferredName()); Map failureStoreRepresentation = (Map) failureStoresRepresentation.get(0); assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName())); assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false)); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionServiceTests.java index b9dc6d349873c..41d00d063955d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionServiceTests.java @@ -261,7 +261,7 @@ private static DataStream newDataStreamInstance( .setReplicated(replicated) .setLifecycle(lifecycle) .setFailureStoreEnabled(failureStores.isEmpty() == false) - .setFailureIndices(failureStores); + .setFailureIndices(DataStream.DataStreamIndices.failureIndicesBuilder(failureStores).build()); if (randomBoolean()) { builder.setSystem(true); builder.setHidden(true); diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index a7ec537823827..20485402b07ae 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -210,8 +210,8 @@ setup: --- "Create data stream with failure store": - requires: - cluster_features: ["gte_v8.11.0"] - reason: "data stream failure stores only creatable in 8.11+" + cluster_features: ["gte_v8.15.0"] + reason: "data stream failure stores REST structure changed in 8.15+" - do: allowed_warnings: @@ -248,9 +248,9 @@ setup: - match: { data_streams.0.status: 'GREEN' } - match: { data_streams.0.template: 'my-template4' } - match: { data_streams.0.hidden: false } - - match: { data_streams.0.failure_store: true } - - length: { data_streams.0.failure_indices: 1 } - - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/'} + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/'} - match: { data_streams.1.name: failure-data-stream2 } - match: { data_streams.1.timestamp_field.name: '@timestamp' } @@ -259,15 +259,15 @@ setup: - match: { data_streams.1.indices.0.index_name: '/\.ds-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - match: { data_streams.1.template: 'my-template4' } - match: { data_streams.1.hidden: false } - - match: { data_streams.1.failure_store: true } - - length: { data_streams.1.failure_indices: 1 } - - match: { data_streams.1.failure_indices.0.index_name: '/\.fs-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.1.failure_store.enabled: true } + - length: { data_streams.1.failure_store.indices: 1 } + - match: { data_streams.1.failure_store.indices.0.index_name: '/\.fs-failure-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } # save the backing index names for later use - set: { data_streams.0.indices.0.index_name: idx0name } - - set: { data_streams.0.failure_indices.0.index_name: fsidx0name } + - set: { data_streams.0.failure_store.indices.0.index_name: fsidx0name } - set: { data_streams.1.indices.0.index_name: idx1name } - - set: { data_streams.1.failure_indices.0.index_name: fsidx1name } + - set: { data_streams.1.failure_store.indices.0.index_name: fsidx1name } - do: indices.get_mapping: @@ -538,8 +538,8 @@ setup: --- "Delete data stream with failure stores": - requires: - cluster_features: ["gte_v8.12.0"] - reason: "data stream failure stores only supported in 8.12+" + cluster_features: ["gte_v8.15.0"] + reason: "data stream failure stores REST structure changed in 8.15+" - do: allowed_warnings: @@ -570,7 +570,7 @@ setup: name: failure-data-stream1 - set: { data_streams.0.indices.0.index_name: idx0name } - - set: { data_streams.0.failure_indices.0.index_name: fs0name } + - set: { data_streams.0.failure_store.indices.0.index_name: fs0name } - do: indices.get: @@ -586,8 +586,8 @@ setup: - match: { data_streams.0.generation: 1 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - length: { data_streams.0.failure_indices: 1 } - - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - do: indices.delete_data_stream: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml index 8c0e27373664d..a3baa524259b8 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/170_modify_data_stream.yml @@ -92,8 +92,8 @@ --- "Modify a data stream's failure store": - requires: - cluster_features: [ "gte_v8.14.0" ] - reason: "this API was released in 8.14.0" + cluster_features: ["gte_v8.15.0"] + reason: "data stream failure stores REST structure changed in 8.15+" test_runner_features: [ "allowed_warnings" ] - do: @@ -128,14 +128,14 @@ indices.get_data_stream: name: data-stream-for-modification - set: { data_streams.0.indices.0.index_name: write_index } - - set: { data_streams.0.failure_indices.0.index_name: first_failure_index } - - set: { data_streams.0.failure_indices.1.index_name: write_failure_index } + - set: { data_streams.0.failure_store.indices.0.index_name: first_failure_index } + - set: { data_streams.0.failure_store.indices.1.index_name: write_failure_index } - do: indices.get_data_stream: name: data-stream-for-modification2 - set: { data_streams.0.indices.0.index_name: second_write_index } - - set: { data_streams.0.failure_indices.0.index_name: second_write_failure_index } + - set: { data_streams.0.failure_store.indices.0.index_name: second_write_failure_index } - do: index: @@ -170,11 +170,11 @@ - match: { data_streams.0.timestamp_field.name: '@timestamp' } - match: { data_streams.0.generation: 3 } - length: { data_streams.0.indices: 1 } - - length: { data_streams.0.failure_indices: 3 } + - length: { data_streams.0.failure_store.indices: 3 } - match: { data_streams.0.indices.0.index_name: $write_index } - - match: { data_streams.0.failure_indices.0.index_name: 'test_index1' } - - match: { data_streams.0.failure_indices.1.index_name: $first_failure_index } - - match: { data_streams.0.failure_indices.2.index_name: $write_failure_index } + - match: { data_streams.0.failure_store.indices.0.index_name: 'test_index1' } + - match: { data_streams.0.failure_store.indices.1.index_name: $first_failure_index } + - match: { data_streams.0.failure_store.indices.2.index_name: $write_failure_index } # An index that has an alias is not allowed to be added to failure store - do: @@ -269,10 +269,10 @@ - match: { data_streams.0.timestamp_field.name: '@timestamp' } - match: { data_streams.0.generation: 4 } - length: { data_streams.0.indices: 1 } - - length: { data_streams.0.failure_indices: 2 } + - length: { data_streams.0.failure_store.indices: 2 } - match: { data_streams.0.indices.0.index_name: $write_index } - - match: { data_streams.0.failure_indices.0.index_name: $first_failure_index } - - match: { data_streams.0.failure_indices.1.index_name: $write_failure_index } + - match: { data_streams.0.failure_store.indices.0.index_name: $first_failure_index } + - match: { data_streams.0.failure_store.indices.1.index_name: $write_failure_index } - do: indices.delete_data_stream: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index 7268ee9bb3b56..9dce5150388d4 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -23,8 +23,8 @@ teardown: --- "Redirect ingest failure in data stream to failure store": - requires: - cluster_features: ["gte_v8.13.0"] - reason: "data stream failure stores only redirect ingest failures in 8.13+" + cluster_features: ["gte_v8.15.0"] + reason: "data stream failure stores REST structure changed in 8.15+" test_runner_features: [allowed_warnings, contains] - do: @@ -74,9 +74,9 @@ teardown: - match: { data_streams.0.timestamp_field.name: '@timestamp' } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - match: { data_streams.0.failure_store: true } - - length: { data_streams.0.failure_indices: 1 } - - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - do: search: @@ -152,9 +152,9 @@ teardown: - match: { data_streams.0.timestamp_field.name: '@timestamp' } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - match: { data_streams.0.failure_store: true } - - length: { data_streams.0.failure_indices: 1 } - - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - do: search: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml index 0074ce425c6f9..91d23afa67af9 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/200_rollover_failure_store.yml @@ -1,8 +1,8 @@ --- setup: - requires: - cluster_features: ["gte_v8.14.0"] - reason: "data stream failure store rollover only supported in 8.14+" + cluster_features: ["gte_v8.15.0"] + reason: "data stream failure stores REST structure changed in 8.15+" test_runner_features: allowed_warnings - do: @@ -48,9 +48,9 @@ setup: - match: { data_streams.0.generation: 2 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - length: { data_streams.0.failure_indices: 2 } - - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - match: { data_streams.0.failure_indices.1.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - length: { data_streams.0.failure_store.indices: 2 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } --- "Roll over a data stream's failure store with conditions": @@ -86,9 +86,9 @@ setup: - match: { data_streams.0.generation: 2 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - length: { data_streams.0.failure_indices: 2 } - - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - match: { data_streams.0.failure_indices.1.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - length: { data_streams.0.failure_store.indices: 2 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.indices.1.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000002/' } --- "Don't roll over a data stream's failure store when conditions aren't met": @@ -112,5 +112,5 @@ setup: - match: { data_streams.0.generation: 1 } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - length: { data_streams.0.failure_indices: 1 } - - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-data-stream-for-rollover-(\d{4}\.\d{2}\.\d{2}-)?000001/' } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml index 32338fea056ae..3ab22e6271c6d 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/30_auto_create_data_stream.yml @@ -50,8 +50,8 @@ --- "Put index template with failure store": - requires: - cluster_features: ["gte_v8.11.0"] - reason: "data stream failure stores only creatable in 8.11+" + cluster_features: ["gte_v8.15.0"] + reason: "data stream failure stores REST structure changed in 8.15+" test_runner_features: allowed_warnings - do: @@ -91,9 +91,9 @@ - match: { data_streams.0.timestamp_field.name: '@timestamp' } - length: { data_streams.0.indices: 1 } - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - - match: { data_streams.0.failure_store: true } - - length: { data_streams.0.failure_indices: 1 } - - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store.enabled: true } + - length: { data_streams.0.failure_store.indices: 1 } + - match: { data_streams.0.failure_store.indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - do: indices.delete_data_stream: diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index 2e50cc0a97677..6898e44335793 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -483,22 +483,22 @@ private Map retrieveEnterpriseGeoData(GeoIpDatabase geoIpDatabas } } case HOSTING_PROVIDER -> { - geoData.put("is_hosting_provider", isHostingProvider); + geoData.put("hosting_provider", isHostingProvider); } case TOR_EXIT_NODE -> { - geoData.put("is_tor_exit_node", isTorExitNode); + geoData.put("tor_exit_node", isTorExitNode); } case ANONYMOUS_VPN -> { - geoData.put("is_anonymous_vpn", isAnonymousVpn); + geoData.put("anonymous_vpn", isAnonymousVpn); } case ANONYMOUS -> { - geoData.put("is_anonymous", isAnonymous); + geoData.put("anonymous", isAnonymous); } case PUBLIC_PROXY -> { - geoData.put("is_public_proxy", isPublicProxy); + geoData.put("public_proxy", isPublicProxy); } case RESIDENTIAL_PROXY -> { - geoData.put("is_residential_proxy", isResidentialProxy); + geoData.put("residential_proxy", isResidentialProxy); } } } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java index f9f79d54522da..ec77cacbdb6b6 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java @@ -373,12 +373,12 @@ public void testEnterprise() throws Exception { location.put("lon", -1.25); assertThat(geoData.get("location"), equalTo(location)); assertThat(geoData.get("network"), equalTo("2.125.160.216/29")); - assertThat(geoData.get("is_hosting_provider"), equalTo(false)); - assertThat(geoData.get("is_tor_exit_node"), equalTo(false)); - assertThat(geoData.get("is_anonymous_vpn"), equalTo(false)); - assertThat(geoData.get("is_anonymous"), equalTo(false)); - assertThat(geoData.get("is_public_proxy"), equalTo(false)); - assertThat(geoData.get("is_residential_proxy"), equalTo(false)); + assertThat(geoData.get("hosting_provider"), equalTo(false)); + assertThat(geoData.get("tor_exit_node"), equalTo(false)); + assertThat(geoData.get("anonymous_vpn"), equalTo(false)); + assertThat(geoData.get("anonymous"), equalTo(false)); + assertThat(geoData.get("public_proxy"), equalTo(false)); + assertThat(geoData.get("residential_proxy"), equalTo(false)); } public void testAddressIsNotInTheDatabase() throws Exception { diff --git a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java index 317bfa9edd1c9..275666eec5c42 100644 --- a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java +++ b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java @@ -9,30 +9,66 @@ package org.elasticsearch.kibana; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.SystemIndexThreadPoolTestCase; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; import java.util.Collection; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.Phaser; import java.util.stream.Stream; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; -public class KibanaThreadPoolIT extends SystemIndexThreadPoolTestCase { +/** + * Tests to verify that system indices are bypassing user-space thread pools + * + *

We can block thread pools by setting them to one thread and 1 element queue, then submitting + * threads that wait on a phaser. This lets us verify that operations on system indices + * are being directed to other thread pools.

+ */ +public class KibanaThreadPoolIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(IndexingPressure.MAX_INDEXING_BYTES.getKey(), "1KB") + .put("thread_pool.search.size", 1) + .put("thread_pool.search.queue_size", 1) + .put("thread_pool.write.size", 1) + .put("thread_pool.write.queue_size", 1) + .put("thread_pool.get.size", 1) + .put("thread_pool.get.queue_size", 1) + .build(); + } + + private static final String USER_INDEX = "user_index"; + // For system indices that use ExecutorNames.CRITICAL_SYSTEM_INDEX_THREAD_POOLS, we'll want to + // block normal system index thread pools as well. + private static final Set THREAD_POOLS_TO_BLOCK = Set.of(ThreadPool.Names.GET, ThreadPool.Names.WRITE, ThreadPool.Names.SEARCH); @Override protected Collection> nodePlugins() { return Set.of(KibanaPlugin.class); } - public void testKibanaThreadPool() { + public void testKibanaThreadPoolByPassesBlockedThreadPools() throws Exception { List kibanaSystemIndices = Stream.of( KibanaPlugin.KIBANA_INDEX_DESCRIPTOR.getIndexPattern(), KibanaPlugin.REPORTING_INDEX_DESCRIPTOR.getIndexPattern(), @@ -61,4 +97,108 @@ public void testKibanaThreadPool() { } }); } + + public void testBlockedThreadPoolsRejectUserRequests() throws Exception { + assertAcked(client().admin().indices().prepareCreate(USER_INDEX)); + + runWithBlockedThreadPools(this::assertThreadPoolsBlocked); + + assertAcked(client().admin().indices().prepareDelete(USER_INDEX)); + } + + private void assertThreadPoolsBlocked() { + + var e1 = expectThrows( + EsRejectedExecutionException.class, + () -> client().prepareIndex(USER_INDEX).setSource(Map.of("foo", "bar")).get() + ); + assertThat(e1.getMessage(), startsWith("rejected execution of TimedRunnable")); + var e2 = expectThrows(EsRejectedExecutionException.class, () -> client().prepareGet(USER_INDEX, "id").get()); + assertThat(e2.getMessage(), startsWith("rejected execution of ActionRunnable")); + var e3 = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch(USER_INDEX) + .setQuery(QueryBuilders.matchAllQuery()) + // Request times out if max concurrent shard requests is set to 1 + .setMaxConcurrentShardRequests(usually() ? SearchRequest.DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS : randomIntBetween(2, 10)) + .get() + ); + assertThat(e3.getMessage(), containsString("all shards failed")); + } + + protected void runWithBlockedThreadPools(Runnable runnable) throws Exception { + Phaser phaser = new Phaser(); + + // register this test's thread + phaser.register(); + + blockThreadPool(phaser); + phaser.arriveAndAwaitAdvance();// wait until all waitAction are executing + + fillQueues(); + + logger.debug("number of nodes " + internalCluster().getNodeNames().length); + logger.debug("number of parties arrived " + phaser.getArrivedParties()); + try { + runnable.run(); + } finally { + phaser.arriveAndAwaitAdvance(); // release all waitAction + } + } + + private void blockThreadPool(Phaser phaser) { + for (String nodeName : internalCluster().getNodeNames()) { + ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, nodeName); + for (String threadPoolName : THREAD_POOLS_TO_BLOCK) { + blockThreadPool(threadPoolName, threadPool, phaser); + } + } + } + + private void fillQueues() { + for (String nodeName : internalCluster().getNodeNames()) { + ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, nodeName); + for (String threadPoolName : THREAD_POOLS_TO_BLOCK) { + fillThreadPoolQueues(threadPoolName, threadPool); + } + } + } + + private static void blockThreadPool(String threadPoolName, ThreadPool threadPool, Phaser phaser) { + ThreadPool.Info info = threadPool.info(threadPoolName); + + Runnable waitAction = () -> { + phaser.arriveAndAwaitAdvance();// block until all are executed on a threadpool + phaser.arriveAndAwaitAdvance();// block until main thread has not finished + }; + + phaser.bulkRegister(info.getMax()); + + for (int i = 0; i < info.getMax(); i++) { + // we need to make sure that there is a task blocking a thread pool + // otherwise a queue might end up having a spot + do { + try { + threadPool.executor(threadPoolName).execute(waitAction); + break; + } catch (EsRejectedExecutionException e) { + // if exception was thrown when submitting, retry. + } + } while (true); + } + } + + private static void fillThreadPoolQueues(String threadPoolName, ThreadPool threadPool) { + ThreadPool.Info info = threadPool.info(threadPoolName); + + for (int i = 0; i < info.getQueueSize().singles(); i++) { + try { + threadPool.executor(threadPoolName).execute(() -> {}); + } catch (EsRejectedExecutionException e) { + // we can't be sure that some other task won't get queued in a test cluster + // but the threadpool's thread is already blocked + } + } + } + } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index d0cef178dc920..aee0d313e4e00 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.ESNetty4IntegTestCase; -import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.MockLogAppender; @@ -24,16 +23,14 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { private MockLogAppender appender; - private Releasable appenderRelease; public void setUp() throws Exception { super.setUp(); - appender = new MockLogAppender(); - appenderRelease = appender.capturing(ESLoggingHandler.class, TransportLogger.class, TcpTransport.class); + appender = MockLogAppender.capture(ESLoggingHandler.class, TransportLogger.class, TcpTransport.class); } public void tearDown() throws Exception { - appenderRelease.close(); + appender.close(); super.tearDown(); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index 7ce962ff56b67..3035213766584 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -40,6 +40,7 @@ import org.elasticsearch.transport.netty4.NettyAllocator; import java.io.Closeable; +import java.io.IOException; import java.net.SocketAddress; import java.net.SocketException; import java.nio.charset.StandardCharsets; @@ -203,7 +204,11 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { - if (cause instanceof PrematureChannelClosureException || cause instanceof SocketException) { + if (cause instanceof PrematureChannelClosureException + || cause instanceof SocketException + || (cause instanceof IOException + && cause.getMessage() != null + && cause.getMessage().contains("An established connection was aborted by the software in your host machine"))) { // no more requests coming, so fast-forward the latch fastForward(); } else { diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index 08e3ac2cbce8c..99b2728ebfa3c 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -206,16 +206,16 @@ private void assertControllerSpawns(final Function pluginsDir String stdoutLoggerName = "test_plugin-controller-stdout"; String stderrLoggerName = "test_plugin-controller-stderr"; - MockLogAppender appender = new MockLogAppender(); Loggers.setLevel(LogManager.getLogger(stdoutLoggerName), Level.TRACE); Loggers.setLevel(LogManager.getLogger(stderrLoggerName), Level.TRACE); CountDownLatch messagesLoggedLatch = new CountDownLatch(2); - if (expectSpawn) { - appender.addExpectation(new ExpectedStreamMessage(stdoutLoggerName, "I am alive", messagesLoggedLatch)); - appender.addExpectation(new ExpectedStreamMessage(stderrLoggerName, "I am an error", messagesLoggedLatch)); - } - try (var ignore = appender.capturing(stdoutLoggerName, stderrLoggerName)) { + try (var appender = MockLogAppender.capture(stdoutLoggerName, stderrLoggerName)) { + if (expectSpawn) { + appender.addExpectation(new ExpectedStreamMessage(stdoutLoggerName, "I am alive", messagesLoggedLatch)); + appender.addExpectation(new ExpectedStreamMessage(stderrLoggerName, "I am an error", messagesLoggedLatch)); + } + Spawner spawner = new Spawner(); spawner.spawnNativeControllers(environment); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index dc4e24959a5c6..81ac8ab1200f6 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -1211,7 +1211,6 @@ private List listPlugins() { /** * Check that readiness listener works */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/108523") public void test500Readiness() throws Exception { assertFalse(readinessProbe(9399)); // Disabling security so we wait for green @@ -1220,6 +1219,7 @@ public void test500Readiness() throws Exception { builder().envVar("readiness.port", "9399").envVar("xpack.security.enabled", "false").envVar("discovery.type", "single-node") ); waitForElasticsearch(installation); + dumpDebug(); assertTrue(readinessProbe(9399)); } diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java index eaf439f264ad5..d04c8802635d3 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java @@ -72,7 +72,7 @@ public void testDanglingIndicesCanBeListed() throws Exception { internalCluster().startNodes(3, buildSettings(0)); final DanglingIndexDetails danglingIndexDetails = createDanglingIndices(INDEX_NAME); - final String stoppedNodeId = mapNodeNameToId(danglingIndexDetails.stoppedNodeName); + final String stoppedNodeId = getNodeId(danglingIndexDetails.stoppedNodeName); final RestClient restClient = getRestClient(); @@ -163,7 +163,12 @@ public void testDanglingIndicesCanBeDeleted() throws Exception { // tombstone has been pushed out of the graveyard. createIndex("additional"); deleteIndex("additional"); - assertThat(listDanglingIndexIds(), is(empty())); + // reading dangling index metadata happens without the all shard locks + // (as we do not know the index name from the index directory structure). + // As a result the index directory could be updated or deleted in the meanwhile by any concurrent operation + // and result in the node request failure that is going to be propagated to the API call. + // Since dandling index API is a best effort we expect such failures to be retried on the client level. + assertBusy(() -> assertThat(listDanglingIndexIds(), is(empty()))); } private List listDanglingIndexIds() throws IOException { @@ -171,15 +176,14 @@ private List listDanglingIndexIds() throws IOException { assertOK(response); final XContentTestUtils.JsonMapView mapView = createJsonMapView(response.getEntity().getContent()); + logger.warn("dangling API response: {}", mapView); assertThat(mapView.get("_nodes.total"), equalTo(3)); assertThat(mapView.get("_nodes.successful"), equalTo(3)); assertThat(mapView.get("_nodes.failed"), equalTo(0)); List indices = mapView.get("dangling_indices"); - List danglingIndexIds = new ArrayList<>(); - for (int i = 0; i < indices.size(); i++) { danglingIndexIds.add(mapView.get("dangling_indices." + i + ".index_uuid")); } @@ -187,23 +191,6 @@ private List listDanglingIndexIds() throws IOException { return danglingIndexIds; } - /** - * Given a node name, finds the corresponding node ID. - */ - private String mapNodeNameToId(String nodeName) throws IOException { - final Response catResponse = getRestClient().performRequest(new Request("GET", "/_cat/nodes?full_id&h=id,name")); - assertOK(catResponse); - - for (String nodeLine : Streams.readAllLines(catResponse.getEntity().getContent())) { - String[] elements = nodeLine.split(" "); - if (elements[1].equals(nodeName)) { - return elements[0]; - } - } - - throw new AssertionError("Failed to map node name [" + nodeName + "] to node ID"); - } - /** * Helper that creates one or more indices, and importantly, * checks that they are green before proceeding. This is important diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/all_path_options.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/all_path_options.yml index ae3eadded108b..86f02641d86f1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/all_path_options.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/all_path_options.yml @@ -78,24 +78,6 @@ setup: - match: {test_index2.settings.index.refresh_interval: 10s} - is_false: foo.settings.index.refresh_interval ---- -"put settings in list of indices": - - skip: - awaits_fix: list of indices not implemented yet - - do: - indices.put_settings: - index: test_index1, test_index2 - body: - refresh_interval: 10s - - - do: - indices.get_settings: {} - - - match: {test_index1.settings.index.refresh_interval: 10s} - - match: {test_index2.settings.index.refresh_interval: 10s} - - is_false: foo.settings.index.refresh_interval - - --- "put settings in blank index": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/85_fields_meta.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/85_fields_meta.yml deleted file mode 100644 index 81be6f82d8a14..0000000000000 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/85_fields_meta.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -"Metadata Fields": - - - skip: - awaits_fix: "Update doesn't return metadata fields, waiting for #3259" - - - do: - indices.create: - index: test_1 - - - do: - update: - index: test_1 - id: "1" - parent: 5 - fields: [ _routing ] - body: - doc: { foo: baz } - upsert: { foo: bar } - - - match: { get._routing: "5" } - - - do: - get: - index: test_1 - id: "1" - parent: 5 - stored_fields: [ _routing ] - - diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 3b9d3e133b63a..fcccc0051f0cd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -387,17 +387,16 @@ public void testMessageLogging() { ) .get(); - MockLogAppender dryRunMockLog = new MockLogAppender(); - dryRunMockLog.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "no completed message logged on dry run", - TransportClusterRerouteAction.class.getName(), - Level.INFO, - "allocated an empty primary*" - ) - ); + try (var dryRunMockLog = MockLogAppender.capture(TransportClusterRerouteAction.class)) { + dryRunMockLog.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "no completed message logged on dry run", + TransportClusterRerouteAction.class.getName(), + Level.INFO, + "allocated an empty primary*" + ) + ); - try (var ignored = dryRunMockLog.capturing(TransportClusterRerouteAction.class)) { AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); ClusterRerouteResponse dryRunResponse = clusterAdmin().prepareReroute() .setExplain(randomBoolean()) @@ -412,24 +411,23 @@ public void testMessageLogging() { dryRunMockLog.assertAllExpectationsMatched(); } - MockLogAppender allocateMockLog = new MockLogAppender(); - allocateMockLog.addExpectation( - new MockLogAppender.SeenEventExpectation( - "message for first allocate empty primary", - TransportClusterRerouteAction.class.getName(), - Level.INFO, - "allocated an empty primary*" + nodeName1 + "*" - ) - ); - allocateMockLog.addExpectation( - new MockLogAppender.UnseenEventExpectation( - "no message for second allocate empty primary", - TransportClusterRerouteAction.class.getName(), - Level.INFO, - "allocated an empty primary*" + nodeName2 + "*" - ) - ); - try (var ignored = allocateMockLog.capturing(TransportClusterRerouteAction.class)) { + try (var allocateMockLog = MockLogAppender.capture(TransportClusterRerouteAction.class)) { + allocateMockLog.addExpectation( + new MockLogAppender.SeenEventExpectation( + "message for first allocate empty primary", + TransportClusterRerouteAction.class.getName(), + Level.INFO, + "allocated an empty primary*" + nodeName1 + "*" + ) + ); + allocateMockLog.addExpectation( + new MockLogAppender.UnseenEventExpectation( + "no message for second allocate empty primary", + TransportClusterRerouteAction.class.getName(), + Level.INFO, + "allocated an empty primary*" + nodeName2 + "*" + ) + ); AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java new file mode 100644 index 0000000000000..a12a26d69c5ff --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/CollapseSearchResultsIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.search.collapse.CollapseBuilder; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; + +public class CollapseSearchResultsIT extends ESIntegTestCase { + + public void testCollapse() { + final String indexName = "test_collapse"; + createIndex(indexName); + final String collapseField = "collapse_field"; + assertAcked(indicesAdmin().preparePutMapping(indexName).setSource(collapseField, "type=keyword")); + index(indexName, "id_1", Map.of(collapseField, "value1")); + index(indexName, "id_2", Map.of(collapseField, "value2")); + refresh(indexName); + assertNoFailuresAndResponse( + prepareSearch(indexName).setQuery(new MatchAllQueryBuilder()) + .setCollapse(new CollapseBuilder(collapseField).setInnerHits(new InnerHitBuilder("ih").setSize(2))), + searchResponse -> { + assertEquals(collapseField, searchResponse.getHits().getCollapseField()); + assertEquals(Set.of(new BytesRef("value1"), new BytesRef("value2")), Set.of(searchResponse.getHits().getCollapseValues())); + } + ); + } +} diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 95961546f1e1f..f1232d2442c8b 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -163,7 +163,8 @@ static TransportVersion def(int id) { public static final TransportVersion SECURITY_ROLE_DESCRIPTION = def(8_654_00_0); public static final TransportVersion ML_INFERENCE_AZURE_OPENAI_COMPLETIONS = def(8_655_00_0); public static final TransportVersion JOIN_STATUS_AGE_SERIALIZATION = def(8_656_00_0); - + public static final TransportVersion ML_RERANK_DOC_OPTIONAL = def(8_657_00_0); + public static final TransportVersion FAILURE_STORE_FIELD_PARITY = def(8_658_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index e6de1faa1aff7..91561814fea1a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -49,6 +49,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest { - public DesiredBalanceRequest() {} + public DesiredBalanceRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public DesiredBalanceRequest(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java index 75434ff554b9c..f26921fd47260 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java @@ -103,6 +103,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) public static class Request extends MasterNodeReadRequest { public Request(TaskId parentTaskId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); setParentTask(parentTaskId); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index 75877cf0630f4..82e4e4123e4fe 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -57,6 +57,7 @@ public AddVotingConfigExclusionsRequest(String... nodeNames) { * @param timeout How long to wait for the added exclusions to take effect and be removed from the voting configuration. */ public AddVotingConfigExclusionsRequest(String[] nodeIds, String[] nodeNames, TimeValue timeout) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); if (timeout.compareTo(TimeValue.ZERO) < 0) { throw new IllegalArgumentException("timeout [" + timeout + "] must be non-negative"); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java index f8f64edad2974..2ddd27261db0f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java @@ -26,7 +26,9 @@ public class ClearVotingConfigExclusionsRequest extends MasterNodeRequest { - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java index e6e2616e67662..46e41d306cefe 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java @@ -102,7 +102,9 @@ public ClusterState afterBatchExecution(ClusterState clusterState, boolean clust } public static class Request extends AcknowledgedRequest { - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java index c7c2b9a290a2e..3d8cdb4b405f8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java @@ -48,6 +48,7 @@ public class UpdateDesiredNodesRequest extends AcknowledgedRequest nodes, boolean dryRun) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); assert historyID != null; assert nodes != null; this.historyID = historyID; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index a94555f1dfd1c..2b60e2d4a5ffa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -37,9 +37,12 @@ public class ClusterHealthRequest extends MasterNodeReadRequest { public GetFeatureUpgradeStatusRequest() { - super(); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); } public GetFeatureUpgradeStatusRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java index ccc4a62a1138f..36a90ae9afe33 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java @@ -20,7 +20,7 @@ public class PostFeatureUpgradeRequest extends MasterNodeRequest { public PostFeatureUpgradeRequest() { - super(); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); } public PostFeatureUpgradeRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java index a88fb83b2300d..5bde01195e35c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java @@ -34,6 +34,7 @@ public class PrevalidateNodeRemovalRequest extends MasterNodeReadRequest { - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 5b49a41ed9476..c4e40f1b208b4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -55,7 +55,9 @@ public ClusterUpdateSettingsRequest(StreamInput in) throws IOException { persistentSettings = readSettingsFromStream(in); } - public ClusterUpdateSettingsRequest() {} + public ClusterUpdateSettingsRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } @Override public ActionRequestValidationException validate() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 6f6253491c580..91c302c8aa7be 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -31,9 +31,12 @@ public final class ClusterSearchShardsRequest extends MasterNodeReadRequest userMetadata; - public CreateSnapshotRequest() {} + public CreateSnapshotRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } /** * Constructs a new put repository request with the provided snapshot and repository names @@ -87,6 +89,7 @@ public CreateSnapshotRequest() {} * @param snapshot snapshot name */ public CreateSnapshotRequest(String repository, String snapshot) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.snapshot = snapshot; this.repository = repository; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index b16041da66bf7..67389ea3116d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -38,6 +38,7 @@ public class DeleteSnapshotRequest extends MasterNodeRequest private boolean includeIndexNames = true; - public GetSnapshotsRequest() {} + public GetSnapshotsRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } /** * Constructs a new get snapshots request with given repository names and list of snapshots @@ -85,6 +87,7 @@ public GetSnapshotsRequest() {} * @param snapshots list of snapshots */ public GetSnapshotsRequest(String[] repositories, String[] snapshots) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.repositories = repositories; this.snapshots = snapshots; } @@ -95,6 +98,7 @@ public GetSnapshotsRequest(String[] repositories, String[] snapshots) { * @param repositories repository names */ public GetSnapshotsRequest(String... repositories) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.repositories = repositories; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java index d8fd55451cc63..7a7cc0c304556 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java @@ -29,6 +29,7 @@ public class GetShardSnapshotRequest extends MasterNodeRequest repositories, ShardId shardId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); assert repositories.isEmpty() == false; assert repositories.stream().noneMatch(Objects::isNull); assert repositories.size() == 1 || repositories.stream().noneMatch(repo -> repo.equals(ALL_REPOSITORIES)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 8d025653d47fe..73339cedb96e0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -60,7 +60,9 @@ public class RestoreSnapshotRequest extends MasterNodeRequest { - public PendingClusterTasksRequest() {} + public PendingClusterTasksRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public PendingClusterTasksRequest(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index fac2006b68814..f223d7fb2762f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -65,7 +65,9 @@ public IndicesAliasesRequest(StreamInput in) throws IOException { origin = in.readOptionalString(); } - public IndicesAliasesRequest() {} + public IndicesAliasesRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } /** * Request to take one or more actions on one or more indexes and alias combinations. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java index 9d10065c9c3e9..09071f2e6ea3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -34,11 +34,14 @@ public class GetAliasesRequest extends MasterNodeReadRequest private String[] originalAliases = Strings.EMPTY_ARRAY; public GetAliasesRequest(String... aliases) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.aliases = aliases; this.originalAliases = aliases; } - public GetAliasesRequest() {} + public GetAliasesRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } /** * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until we no diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java index 9427a5fa363ba..9a722f1bce2a9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequest.java @@ -37,12 +37,15 @@ public CloseIndexRequest(StreamInput in) throws IOException { waitForActiveShards = ActiveShardCount.readFrom(in); } - public CloseIndexRequest() {} + public CloseIndexRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } /** * Constructs a new close index request for the specified index. */ public CloseIndexRequest(String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indices = indices; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 9cd7d713a3a4c..094fccbc35182 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -266,9 +266,9 @@ ClusterState execute( final var dataStream = clusterState.metadata().dataStreams().get(request.index()); final var backingIndexName = dataStream.getIndices().get(0).getName(); - final var indexNames = dataStream.getFailureIndices().isEmpty() + final var indexNames = dataStream.getFailureIndices().getIndices().isEmpty() ? List.of(backingIndexName) - : List.of(backingIndexName, dataStream.getFailureIndices().get(0).getName()); + : List.of(backingIndexName, dataStream.getFailureIndices().getIndices().get(0).getName()); taskContext.success(getAckListener(indexNames, allocationActionMultiListener)); successfulRequests.put(request, indexNames); return clusterState; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 21f187f052580..3a78738ae986a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -111,7 +111,9 @@ public CreateIndexRequest(StreamInput in) throws IOException { } } - public CreateIndexRequest() {} + public CreateIndexRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } /** * Constructs a request to create an index. @@ -129,6 +131,7 @@ public CreateIndexRequest(String index) { * @param settings the settings to apply to the index */ public CreateIndexRequest(String index, Settings settings) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.index = index; this.settings = settings; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java index b8206cba8de2a..daceeece4f97b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java @@ -30,7 +30,7 @@ public DeleteDanglingIndexRequest(StreamInput in) throws IOException { } public DeleteDanglingIndexRequest(String indexUUID, boolean acceptDataLoss) { - super(); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indexUUID = Objects.requireNonNull(indexUUID, "indexUUID cannot be null"); this.acceptDataLoss = acceptDataLoss; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java index 66378ab9907d8..be2fb10821662 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java @@ -32,7 +32,7 @@ public ImportDanglingIndexRequest(StreamInput in) throws IOException { } public ImportDanglingIndexRequest(String indexUUID, boolean acceptDataLoss) { - super(); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indexUUID = Objects.requireNonNull(indexUUID, "indexUUID cannot be null"); this.acceptDataLoss = acceptDataLoss; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java index 87cfc303a289a..2cb431577242d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -48,7 +48,9 @@ public DeleteIndexRequest(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); } - public DeleteIndexRequest() {} + public DeleteIndexRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } /** * Constructs a new delete index request for the specified index. @@ -56,6 +58,7 @@ public DeleteIndexRequest() {} * @param index The index to delete. Use "_all" to delete all indices. */ public DeleteIndexRequest(String index) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indices = new String[] { index }; } @@ -65,6 +68,7 @@ public DeleteIndexRequest(String index) { * @param indices The indices to delete. Use "_all" to delete all indices. */ public DeleteIndexRequest(String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indices = indices; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index edc6381438635..707286801cf66 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -108,13 +108,16 @@ public PutMappingRequest(StreamInput in) throws IOException { writeIndexOnly = in.readBoolean(); } - public PutMappingRequest() {} + public PutMappingRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } /** * Constructs a new put mapping request against one or more indices. If nothing is set then * it will be executed against all indices. */ public PutMappingRequest(String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indices = indices; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java index fb0745eb72d1f..4bb4578f24459 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java @@ -39,12 +39,15 @@ public OpenIndexRequest(StreamInput in) throws IOException { waitForActiveShards = ActiveShardCount.readFrom(in); } - public OpenIndexRequest() {} + public OpenIndexRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } /** * Constructs a new open index request for the specified index. */ public OpenIndexRequest(String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indices = indices; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockRequest.java index caf33a541e92a..9331d7010a6e6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockRequest.java @@ -43,6 +43,7 @@ public AddIndexBlockRequest(StreamInput in) throws IOException { * Constructs a new request for the specified block and indices */ public AddIndexBlockRequest(APIBlock block, String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.block = Objects.requireNonNull(block); this.indices = Objects.requireNonNull(indices); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index 4284d860d85c0..ed3721b35f3b4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -196,12 +196,11 @@ public static NameResolution resolveRolloverNames( final IndexAbstraction indexAbstraction = currentState.metadata().getIndicesLookup().get(rolloverTarget); return switch (indexAbstraction.getType()) { case ALIAS -> resolveAliasRolloverNames(currentState.metadata(), indexAbstraction, newIndexName); - case DATA_STREAM -> { - if (isFailureStoreRollover) { - yield resolveDataStreamFailureStoreRolloverNames(currentState.metadata(), (DataStream) indexAbstraction); - } - yield resolveDataStreamRolloverNames(currentState.getMetadata(), (DataStream) indexAbstraction); - } + case DATA_STREAM -> resolveDataStreamRolloverNames( + currentState.metadata(), + (DataStream) indexAbstraction, + isFailureStoreRollover + ); default -> // the validate method above prevents this case throw new IllegalStateException("unable to roll over type [" + indexAbstraction.getType().getDisplayName() + "]"); @@ -220,19 +219,15 @@ private static NameResolution resolveAliasRolloverNames(Metadata metadata, Index return new NameResolution(sourceIndexName, unresolvedName, rolloverIndexName); } - private static NameResolution resolveDataStreamRolloverNames(Metadata metadata, DataStream dataStream) { - final IndexMetadata originalWriteIndex = metadata.index(dataStream.getWriteIndex()); - return new NameResolution(originalWriteIndex.getIndex().getName(), null, dataStream.nextWriteIndexAndGeneration(metadata).v1()); - } - - private static NameResolution resolveDataStreamFailureStoreRolloverNames(Metadata metadata, DataStream dataStream) { - assert dataStream.getFailureStoreWriteIndex() != null : "Unable to roll over failure store with no failure store indices"; + private static NameResolution resolveDataStreamRolloverNames(Metadata metadata, DataStream dataStream, boolean isFailureStoreRollover) { + final DataStream.DataStreamIndices dataStreamIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); + assert dataStreamIndices.getWriteIndex() != null : "Unable to roll over dataStreamIndices with no indices"; - final IndexMetadata originalWriteIndex = metadata.index(dataStream.getFailureStoreWriteIndex()); + final IndexMetadata originalWriteIndex = metadata.index(dataStreamIndices.getWriteIndex()); return new NameResolution( originalWriteIndex.getIndex().getName(), null, - dataStream.nextFailureStoreWriteIndexAndGeneration(metadata).v1() + dataStream.nextWriteIndexAndGeneration(metadata, dataStreamIndices).v1() ); } @@ -327,10 +322,9 @@ private RolloverResult rolloverDataStream( templateV2 = systemDataStreamDescriptor.getComposableIndexTemplate(); } - final Index originalWriteIndex = isFailureStoreRollover ? dataStream.getFailureStoreWriteIndex() : dataStream.getWriteIndex(); - final Tuple nextIndexAndGeneration = isFailureStoreRollover - ? dataStream.nextFailureStoreWriteIndexAndGeneration(currentState.metadata()) - : dataStream.nextWriteIndexAndGeneration(currentState.metadata()); + final DataStream.DataStreamIndices dataStreamIndices = dataStream.getDataStreamIndices(isFailureStoreRollover); + final Index originalWriteIndex = dataStreamIndices.getWriteIndex(); + final Tuple nextIndexAndGeneration = dataStream.nextWriteIndexAndGeneration(metadata, dataStreamIndices); final String newWriteIndexName = nextIndexAndGeneration.v1(); final long newGeneration = nextIndexAndGeneration.v2(); MetadataCreateIndexService.validateIndexName(newWriteIndexName, currentState); // fails if the index already exists @@ -438,7 +432,7 @@ yield new DataStreamAutoShardingEvent( metadataBuilder = withShardSizeForecastForWriteIndex(dataStreamName, metadataBuilder); newState = ClusterState.builder(newState).metadata(metadataBuilder).build(); - newState = MetadataDataStreamsService.setRolloverOnWrite(newState, dataStreamName, false); + newState = MetadataDataStreamsService.setRolloverOnWrite(newState, dataStreamName, false, isFailureStoreRollover); return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), newState); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 1f582f95aea91..09f9411d5a834 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -120,9 +120,12 @@ public RolloverRequest(StreamInput in) throws IOException { } } - RolloverRequest() {} + RolloverRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public RolloverRequest(String rolloverTarget, String newIndexName) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.rolloverTarget = rolloverTarget; this.newIndexName = newIndexName; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index b9ab28dc80e65..abf42cffdaa01 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -169,12 +169,13 @@ protected void masterOperation( assert task instanceof CancellableTask; Metadata metadata = clusterState.metadata(); // We evaluate the names of the index for which we should evaluate conditions, as well as what our newly created index *would* be. + boolean targetFailureStore = rolloverRequest.indicesOptions().failureStoreOptions().includeFailureIndices(); final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, rolloverRequest.getRolloverTarget(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.indicesOptions().failureStoreOptions().includeFailureIndices() + targetFailureStore ); final String trialSourceIndexName = trialRolloverNames.sourceName(); final String trialRolloverIndexName = trialRolloverNames.rolloverName(); @@ -200,6 +201,7 @@ protected void masterOperation( metadataDataStreamsService.setRolloverOnWrite( rolloverRequest.getRolloverTarget(), true, + targetFailureStore, rolloverRequest.ackTimeout(), rolloverRequest.masterNodeTimeout(), listener.map( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java index 96cbfc80c8d67..42ff256579984 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java @@ -51,7 +51,9 @@ public GetSettingsRequest includeDefaults(boolean includeDefaults) { return this; } - public GetSettingsRequest() {} + public GetSettingsRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public GetSettingsRequest(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 7fa2e11317a43..c3e87f2f54cf0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -61,12 +61,15 @@ public UpdateSettingsRequest(StreamInput in) throws IOException { } } - public UpdateSettingsRequest() {} + public UpdateSettingsRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } /** * Constructs a new request to update settings for one or more indices */ public UpdateSettingsRequest(String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indices = indices; } @@ -74,6 +77,7 @@ public UpdateSettingsRequest(String... indices) { * Constructs a new request to update settings for one or more indices */ public UpdateSettingsRequest(Settings settings, String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indices = indices; this.settings = settings; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java index 475c9c16f149e..8cf2427e91c15 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -40,10 +40,13 @@ public class IndicesShardStoresRequest extends MasterNodeReadRequestindices */ public IndicesShardStoresRequest(String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.indices = indices; } - public IndicesShardStoresRequest() {} + public IndicesShardStoresRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public IndicesShardStoresRequest(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index c39d2e1114618..ef709fc4457a7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -73,9 +73,12 @@ public ResizeRequest(StreamInput in) throws IOException { } } - ResizeRequest() {} + ResizeRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public ResizeRequest(String targetIndex, String sourceIndex) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.targetIndexRequest = new CreateIndexRequest(targetIndex); this.sourceIndex = sourceIndex; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java index b3f3a0a203df5..3c2416200ce61 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java @@ -28,12 +28,15 @@ public DeleteIndexTemplateRequest(StreamInput in) throws IOException { name = in.readString(); } - public DeleteIndexTemplateRequest() {} + public DeleteIndexTemplateRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } /** * Constructs a new delete index request for the specified name. */ public DeleteIndexTemplateRequest(String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.name = name; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java index 593162305f2d0..9ac10d782a605 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java @@ -109,6 +109,7 @@ public Request(StreamInput in) throws IOException { * Constructs a new delete index request for the specified name. */ public Request(String... names) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.names = Objects.requireNonNull(names, "component templates to delete must not be null"); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java index f884c8404d0f2..fa40a901c705b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java @@ -108,6 +108,7 @@ public Request(StreamInput in) throws IOException { * Constructs a new delete template request for the specified name. */ public Request(String... names) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.names = Objects.requireNonNull(names, "templates to delete must not be null"); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index 3d5b4a73e0a57..5483097b140da 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -48,9 +48,12 @@ public static class Request extends MasterNodeReadRequest { private String name; private boolean includeDefaults; - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.name = name; this.includeDefaults = false; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index aebb9cef12f43..5cb35d23c8b7c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -49,6 +49,7 @@ public static class Request extends MasterNodeReadRequest { * @param name A template name or pattern, or {@code null} to retrieve all templates. */ public Request(@Nullable String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); if (name != null && name.contains(",")) { throw new IllegalArgumentException("template name may not contain ','"); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java index ec7ce037e651c..19c89b0186733 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java @@ -25,6 +25,7 @@ public class GetIndexTemplatesRequest extends MasterNodeReadRequest { private TransportPutComposableIndexTemplateAction.Request indexTemplateRequest; private boolean includeDefaults = false; - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(String templateName) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); if (templateName == null) { throw new IllegalArgumentException("template name cannot be null"); } @@ -53,6 +56,7 @@ public Request(String templateName) { } public Request(TransportPutComposableIndexTemplateAction.Request indexTemplateRequest) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); if (indexTemplateRequest == null) { throw new IllegalArgumentException("index template body must be present"); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutComponentTemplateAction.java index 56e7079ec38ba..ebf1e9e74b793 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutComponentTemplateAction.java @@ -56,6 +56,7 @@ public Request(StreamInput in) throws IOException { * Constructs a new put component template request with the provided name. */ public Request(String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.name = name; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 10c9a5e7205b0..6ef887847c270 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -92,12 +92,15 @@ public PutIndexTemplateRequest(StreamInput in) throws IOException { version = in.readOptionalVInt(); } - public PutIndexTemplateRequest() {} + public PutIndexTemplateRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } /** * Constructs a new put index template request with the provided name. */ public PutIndexTemplateRequest(String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.name = name; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java index 8d259083a1352..86c6109469477 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java @@ -156,6 +156,7 @@ public Request(StreamInput in) throws IOException { * Constructs a new put index template request with the provided name. */ public Request(String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.name = name; } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java index f9e559fa16ec7..40060d5e5d927 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java @@ -36,11 +36,13 @@ public static class Request extends AcknowledgedRequest implements Indi private final long startTime; public Request(String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.name = name; this.startTime = System.currentTimeMillis(); } public Request(String name, long startTime) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.name = name; this.startTime = startTime; } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java index b68a7d3fcd159..5b79eae0cebfd 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java @@ -47,6 +47,7 @@ public static class Request extends MasterNodeRequest implements Indice private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true, false); public Request(String... names) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.names = Objects.requireNonNull(names); this.wildcardExpressionsOriginallySpecified = Arrays.stream(names).anyMatch(Regex::isSimpleMatchPattern); } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 1517b368e21ea..841a2df5eada6 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -58,10 +58,12 @@ public static class Request extends MasterNodeReadRequest implements In private boolean includeDefaults = false; public Request(String[] names) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.names = names; } public Request(String[] names, boolean includeDefaults) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.names = names; this.includeDefaults = includeDefaults; } @@ -187,6 +189,7 @@ public static class DataStreamInfo implements SimpleDiffable, To public static final ParseField TIME_SINCE_LAST_AUTO_SHARD_EVENT_MILLIS = new ParseField( "time_since_last_auto_shard_event_millis" ); + public static final ParseField FAILURE_STORE_ENABLED = new ParseField("enabled"); private final DataStream dataStream; private final ClusterHealthStatus dataStreamStatus; @@ -220,7 +223,7 @@ public DataStreamInfo( @SuppressWarnings("unchecked") DataStreamInfo(StreamInput in) throws IOException { this( - new DataStream(in), + DataStream.read(in), ClusterHealthStatus.readFrom(in), in.readOptionalString(), in.readOptionalString(), @@ -298,45 +301,8 @@ public XContentBuilder toXContent( .field(DataStream.NAME_FIELD.getPreferredName(), DataStream.TIMESTAMP_FIELD_NAME) .endObject(); - builder.field(DataStream.INDICES_FIELD.getPreferredName()); - if (dataStream.getIndices() == null) { - builder.nullValue(); - } else { - builder.startArray(); - for (Index index : dataStream.getIndices()) { - builder.startObject(); - index.toXContentFragment(builder); - IndexProperties indexProperties = indexSettingsValues.get(index); - if (indexProperties != null) { - builder.field(PREFER_ILM.getPreferredName(), indexProperties.preferIlm()); - if (indexProperties.ilmPolicyName() != null) { - builder.field(ILM_POLICY_FIELD.getPreferredName(), indexProperties.ilmPolicyName()); - } - builder.field(MANAGED_BY.getPreferredName(), indexProperties.managedBy.displayValue); - } - builder.endObject(); - } - builder.endArray(); - } + indicesToXContent(builder, dataStream.getIndices()); builder.field(DataStream.GENERATION_FIELD.getPreferredName(), dataStream.getGeneration()); - if (DataStream.isFailureStoreFeatureFlagEnabled()) { - builder.field(DataStream.FAILURE_INDICES_FIELD.getPreferredName()); - builder.startArray(); - for (Index failureStore : dataStream.getFailureIndices()) { - builder.startObject(); - failureStore.toXContentFragment(builder); - IndexProperties indexProperties = indexSettingsValues.get(failureStore); - if (indexProperties != null) { - builder.field(PREFER_ILM.getPreferredName(), indexProperties.preferIlm()); - if (indexProperties.ilmPolicyName() != null) { - builder.field(ILM_POLICY_FIELD.getPreferredName(), indexProperties.ilmPolicyName()); - } - builder.field(MANAGED_BY.getPreferredName(), indexProperties.managedBy.displayValue); - } - builder.endObject(); - } - builder.endArray(); - } if (dataStream.getMetadata() != null) { builder.field(DataStream.METADATA_FIELD.getPreferredName(), dataStream.getMetadata()); } @@ -359,20 +325,7 @@ public XContentBuilder toXContent( builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), dataStream.isAllowCustomRouting()); builder.field(REPLICATED.getPreferredName(), dataStream.isReplicated()); builder.field(ROLLOVER_ON_WRITE.getPreferredName(), dataStream.rolloverOnWrite()); - if (DataStream.isFailureStoreFeatureFlagEnabled()) { - builder.field(DataStream.FAILURE_STORE_FIELD.getPreferredName(), dataStream.isFailureStoreEnabled()); - } - if (dataStream.getAutoShardingEvent() != null) { - DataStreamAutoShardingEvent autoShardingEvent = dataStream.getAutoShardingEvent(); - builder.startObject(AUTO_SHARDING_FIELD.getPreferredName()); - autoShardingEvent.toXContent(builder, params); - builder.humanReadableField( - TIME_SINCE_LAST_AUTO_SHARD_EVENT_MILLIS.getPreferredName(), - TIME_SINCE_LAST_AUTO_SHARD_EVENT.getPreferredName(), - autoShardingEvent.getTimeSinceLastAutoShardingEvent(System::currentTimeMillis) - ); - builder.endObject(); - } + addAutoShardingEvent(builder, params, dataStream.getAutoShardingEvent()); if (timeSeries != null) { builder.startObject(TIME_SERIES.getPreferredName()); builder.startArray(TEMPORAL_RANGES.getPreferredName()); @@ -387,10 +340,56 @@ public XContentBuilder toXContent( builder.endArray(); builder.endObject(); } + if (DataStream.isFailureStoreFeatureFlagEnabled()) { + builder.startObject(DataStream.FAILURE_STORE_FIELD.getPreferredName()); + builder.field(FAILURE_STORE_ENABLED.getPreferredName(), dataStream.isFailureStoreEnabled()); + builder.field( + DataStream.ROLLOVER_ON_WRITE_FIELD.getPreferredName(), + dataStream.getFailureIndices().isRolloverOnWrite() + ); + indicesToXContent(builder, dataStream.getFailureIndices().getIndices()); + addAutoShardingEvent(builder, params, dataStream.getFailureIndices().getAutoShardingEvent()); + builder.endObject(); + } builder.endObject(); return builder; } + private XContentBuilder indicesToXContent(XContentBuilder builder, List indices) throws IOException { + builder.field(DataStream.INDICES_FIELD.getPreferredName()); + builder.startArray(); + for (Index index : indices) { + builder.startObject(); + index.toXContentFragment(builder); + IndexProperties indexProperties = indexSettingsValues.get(index); + if (indexProperties != null) { + builder.field(PREFER_ILM.getPreferredName(), indexProperties.preferIlm()); + if (indexProperties.ilmPolicyName() != null) { + builder.field(ILM_POLICY_FIELD.getPreferredName(), indexProperties.ilmPolicyName()); + } + builder.field(MANAGED_BY.getPreferredName(), indexProperties.managedBy.displayValue); + } + builder.endObject(); + } + builder.endArray(); + return builder; + } + + private void addAutoShardingEvent(XContentBuilder builder, Params params, DataStreamAutoShardingEvent autoShardingEvent) + throws IOException { + if (autoShardingEvent == null) { + return; + } + builder.startObject(AUTO_SHARDING_FIELD.getPreferredName()); + autoShardingEvent.toXContent(builder, params); + builder.humanReadableField( + TIME_SINCE_LAST_AUTO_SHARD_EVENT_MILLIS.getPreferredName(), + TIME_SINCE_LAST_AUTO_SHARD_EVENT.getPreferredName(), + autoShardingEvent.getTimeSinceLastAutoShardingEvent(System::currentTimeMillis) + ); + builder.endObject(); + } + /** * Computes and returns which system will manage the next generation for this data stream. */ diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/MigrateToDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/MigrateToDataStreamAction.java index 3a834273e84cf..226b8d44f636c 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/MigrateToDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/MigrateToDataStreamAction.java @@ -35,6 +35,7 @@ public static class Request extends AcknowledgedRequest actions) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.actions = Collections.unmodifiableList(actions); } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/PromoteDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/PromoteDataStreamAction.java index 3b3e644272cbc..0853d30d22de4 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/PromoteDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/PromoteDataStreamAction.java @@ -35,6 +35,7 @@ public static class Request extends MasterNodeRequest implements In private boolean includeDefaults = false; public Request(String[] names) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.names = names; } public Request(String[] names, boolean includeDefaults) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.names = names; this.includeDefaults = includeDefaults; } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java index 8156e03b0cdd1..7bb63ae27b526 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/PutDataStreamLifecycleAction.java @@ -95,6 +95,7 @@ public Request(String[] names, @Nullable TimeValue dataRetention) { } public Request(String[] names, DataStreamLifecycle lifecycle) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.names = names; this.lifecycle = lifecycle; } @@ -104,6 +105,7 @@ public Request(String[] names, @Nullable TimeValue dataRetention, @Nullable Bool } public Request(String[] names, @Nullable TimeValue dataRetention, @Nullable Boolean enabled, @Nullable Downsampling downsampling) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.names = names; this.lifecycle = DataStreamLifecycle.newBuilder() .dataRetention(dataRetention) diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index e8e299c58d2eb..7d2b1be79731e 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -50,13 +50,16 @@ public Request( final TimeValue waitTimeout, final DownsampleConfig downsampleConfig ) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.sourceIndex = sourceIndex; this.targetIndex = targetIndex; this.waitTimeout = waitTimeout == null ? DEFAULT_WAIT_TIMEOUT : waitTimeout; this.downsampleConfig = downsampleConfig; } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index afe918c6853e6..794a3f38b56bb 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -862,12 +862,12 @@ public Index getConcreteWriteIndex(IndexAbstraction ia, Metadata metadata) { // Resolve write index and get parent data stream to handle the case of dealing with an alias String defaultWriteIndexName = ia.getWriteIndex().getName(); DataStream dataStream = metadata.getIndicesLookup().get(defaultWriteIndexName).getParentDataStream(); - if (dataStream.getFailureIndices().size() < 1) { + if (dataStream.getFailureIndices().getIndices().size() < 1) { throw new ElasticsearchException( "Attempting to write a document to a failure store but the target data stream does not have one enabled" ); } - return dataStream.getFailureIndices().get(dataStream.getFailureIndices().size() - 1); + return dataStream.getFailureIndices().getIndices().get(dataStream.getFailureIndices().getIndices().size() - 1); } else { // Resolve as normal return ia.getWriteIndex(this, metadata); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequest.java index 3810d95872417..4ac4d63ba5de0 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequest.java @@ -20,6 +20,7 @@ public class DeletePipelineRequest extends AcknowledgedRequest * Create a new pipeline request with the id and source along with the content type of the source */ public PutPipelineRequest(String id, BytesReference source, XContentType xContentType, Integer version) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.id = Objects.requireNonNull(id); this.source = Objects.requireNonNull(source); this.xContentType = Objects.requireNonNull(xContentType); diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 48c2f1890ba08..e8470ba77632f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -101,6 +101,7 @@ private void doRun() { hit.setInnerHits(Maps.newMapWithExpectedSize(innerHitBuilders.size())); } hit.getInnerHits().put(innerHitBuilder.getName(), innerHits); + assert innerHits.isPooled() == false || hit.isPooled() : "pooled inner hits can only be added to a pooled hit"; innerHits.mustIncRef(); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java index 7e271536be9fe..b6389d0b112b6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java @@ -34,17 +34,21 @@ public abstract class AcknowledgedRequest + * For requests which originate in the REST layer, use {@link + * org.elasticsearch.rest.RestUtils#getMasterNodeTimeout} to determine the timeout. + *

+ * For internally-generated requests, choose an appropriate timeout. Often this will be {@link + * TimeValue#MAX_VALUE} (or {@link TimeValue#MINUS_ONE} which means an infinite timeout in 8.15.0 onwards) + * since usually we want internal requests to wait for as long as necessary to complete. + * + * @param ackTimeout specifies how long to wait for all relevant nodes to apply a cluster state update and acknowledge this to + * the elected master. */ - protected AcknowledgedRequest(TimeValue ackTimeout) { + protected AcknowledgedRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout); this.ackTimeout = Objects.requireNonNull(ackTimeout); } @@ -94,6 +98,8 @@ public Plain(StreamInput in) throws IOException { super(in); } - public Plain() {} + public Plain() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadRequest.java index 7f4100473c42c..92788f53279d5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeReadRequest.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -20,7 +21,20 @@ public abstract class MasterNodeReadRequest + * For requests which originate in the REST layer, use {@link + * org.elasticsearch.rest.RestUtils#getMasterNodeTimeout} to determine the timeout. + *

+ * For internally-generated requests, choose an appropriate timeout. Often this will be {@link + * TimeValue#MAX_VALUE} (or {@link TimeValue#MINUS_ONE} which means an infinite timeout in 8.15.0 onwards) + * since usually we want internal requests to wait for as long as necessary to complete. + */ + protected MasterNodeReadRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); + } protected MasterNodeReadRequest(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 063dbb0397de8..1b3dca31689e2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -21,15 +21,36 @@ */ public abstract class MasterNodeRequest> extends ActionRequest { - public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); - - private TimeValue masterNodeTimeout = DEFAULT_MASTER_NODE_TIMEOUT; + /** + * The default timeout for master-node requests. It's super-trappy to have such a default, because it makes it all too easy to forget + * to add a mechanism by which clients can change it. Without such a mechanism things will work fine until we encounter a large cluster + * that is struggling to process cluster state updates fast enough, and it's a disaster if we cannot extend the master-node timeout in + * those cases. We shouldn't use this any more and should work towards removing it. + *

+ * For requests which originate in the REST layer, use {@link org.elasticsearch.rest.RestUtils#getMasterNodeTimeout} to determine the + * timeout. + *

+ * For internally-generated requests, choose an appropriate timeout. Often this will be {@link TimeValue#MAX_VALUE} (or {@link + * TimeValue#MINUS_ONE} which means an infinite timeout in 8.15.0 onwards) since usually we want internal requests to wait for as long + * as necessary to complete. + * + * @deprecated all requests should specify a timeout, see #107984. + */ + @Deprecated(forRemoval = true) + public static final TimeValue TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); - protected MasterNodeRequest() {} + private TimeValue masterNodeTimeout; /** * @param masterNodeTimeout Specifies how long to wait when the master has not been discovered yet, or is disconnected, or is busy - * processing other tasks. The value {@link TimeValue#MINUS_ONE} means to wait forever. + * processing other tasks. The value {@link TimeValue#MINUS_ONE} means to wait forever in 8.15.0 onwards. + *

+ * For requests which originate in the REST layer, use {@link + * org.elasticsearch.rest.RestUtils#getMasterNodeTimeout} to determine the timeout. + *

+ * For internally-generated requests, choose an appropriate timeout. Often this will be {@link + * TimeValue#MAX_VALUE} (or {@link TimeValue#MINUS_ONE} which means an infinite timeout in 8.15.0 onwards) + * since usually we want internal requests to wait for as long as necessary to complete. */ protected MasterNodeRequest(TimeValue masterNodeTimeout) { this.masterNodeTimeout = Objects.requireNonNull(masterNodeTimeout); @@ -49,7 +70,14 @@ public void writeTo(StreamOutput out) throws IOException { /** * Specifies how long to wait when the master has not been discovered yet, or is disconnected, or is busy processing other tasks. The - * value {@link TimeValue#MINUS_ONE} means to wait forever. + * value {@link TimeValue#MINUS_ONE} means to wait forever in 8.15.0 onwards. + *

+ * For requests which originate in the REST layer, use {@link org.elasticsearch.rest.RestUtils#getMasterNodeTimeout} to determine the + * timeout. + *

+ * For internally-generated requests, choose an appropriate timeout. Often this will be {@link TimeValue#MAX_VALUE} (or {@link + * TimeValue#MINUS_ONE} which means an infinite timeout in 8.15.0 onwards) since usually we want internal requests to wait for as long + * as necessary to complete. */ @SuppressWarnings("unchecked") public final Request masterNodeTimeout(TimeValue timeout) { diff --git a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java index 00384852d1472..94ba504c8b175 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java @@ -26,10 +26,13 @@ public abstract class ClusterInfoRequest indices; private final long generation; @Nullable private final Map metadata; @@ -114,12 +111,10 @@ public static boolean isFailureStoreFeatureFlagEnabled() { private final IndexMode indexMode; @Nullable private final DataStreamLifecycle lifecycle; - private final boolean rolloverOnWrite; private final boolean failureStoreEnabled; - private final List failureIndices; - private volatile Set failureStoreLookup; - @Nullable - private final DataStreamAutoShardingEvent autoShardingEvent; + + private final DataStreamIndices backingIndices; + private final DataStreamIndices failureIndices; public DataStream( String name, @@ -139,7 +134,6 @@ public DataStream( ) { this( name, - indices, generation, metadata, hidden, @@ -150,16 +144,14 @@ public DataStream( indexMode, lifecycle, failureStoreEnabled, - failureIndices, - rolloverOnWrite, - autoShardingEvent + new DataStreamIndices(BACKING_INDEX_PREFIX, List.copyOf(indices), rolloverOnWrite, autoShardingEvent), + new DataStreamIndices(FAILURE_STORE_PREFIX, List.copyOf(failureIndices), false, null) ); } // visible for testing DataStream( String name, - List indices, long generation, Map metadata, boolean hidden, @@ -170,13 +162,10 @@ public DataStream( IndexMode indexMode, DataStreamLifecycle lifecycle, boolean failureStoreEnabled, - List failureIndices, - boolean rolloverOnWrite, - @Nullable DataStreamAutoShardingEvent autoShardingEvent + DataStreamIndices backingIndices, + DataStreamIndices failureIndices ) { this.name = name; - this.indices = List.copyOf(indices); - assert indices.isEmpty() == false; this.generation = generation; this.metadata = metadata; assert system == false || hidden; // system indices must be hidden @@ -188,21 +177,11 @@ public DataStream( this.indexMode = indexMode; this.lifecycle = lifecycle; this.failureStoreEnabled = failureStoreEnabled; + assert backingIndices.indices.isEmpty() == false; + assert replicated == false || (backingIndices.rolloverOnWrite == false && failureIndices.rolloverOnWrite == false) + : "replicated data streams cannot be marked for lazy rollover"; + this.backingIndices = backingIndices; this.failureIndices = failureIndices; - assert assertConsistent(this.indices); - assert replicated == false || rolloverOnWrite == false : "replicated data streams cannot be marked for lazy rollover"; - this.rolloverOnWrite = rolloverOnWrite; - this.autoShardingEvent = autoShardingEvent; - } - - private static boolean assertConsistent(List indices) { - assert indices.size() > 0; - final Set indexNames = new HashSet<>(); - for (Index index : indices) { - final boolean added = indexNames.add(index.getName()); - assert added : "found duplicate index entries in " + indices; - } - return true; } @Override @@ -222,20 +201,16 @@ public boolean isDataStreamRelated() { @Override public List getIndices() { - return indices; + return backingIndices.indices; } public long getGeneration() { return generation; } - public List getFailureIndices() { - return failureIndices; - } - @Override public Index getWriteIndex() { - return indices.get(indices.size() - 1); + return backingIndices.getWriteIndex(); } /** @@ -243,29 +218,18 @@ public Index getWriteIndex() { */ @Nullable public Index getFailureStoreWriteIndex() { - return isFailureStoreEnabled() == false || failureIndices.isEmpty() ? null : failureIndices.get(failureIndices.size() - 1); + return isFailureStoreEnabled() == false || failureIndices.indices.isEmpty() ? null : failureIndices.getWriteIndex(); } /** * Returns true if the index name provided belongs to a failure store index. - * This method builds a local Set with all the failure store index names and then checks if it contains the name. - * This will perform better if there are multiple indices of this data stream checked. */ public boolean isFailureStoreIndex(String indexName) { - if (failureStoreLookup == null) { - // There is a chance this will be calculated twice, but it's a relatively cheap action, - // so it's not worth synchronising - if (failureIndices == null || failureIndices.isEmpty()) { - failureStoreLookup = Set.of(); - } else { - failureStoreLookup = failureIndices.stream().map(Index::getName).collect(Collectors.toSet()); - } - } - return failureStoreLookup.contains(indexName); + return failureIndices.containsIndex(indexName); } public boolean rolloverOnWrite() { - return rolloverOnWrite; + return backingIndices.rolloverOnWrite; } /** @@ -275,8 +239,8 @@ public boolean rolloverOnWrite() { * an end time that is less than the provided timestamp. Otherwise null is returned. */ public Index selectTimeSeriesWriteIndex(Instant timestamp, Metadata metadata) { - for (int i = indices.size() - 1; i >= 0; i--) { - Index index = indices.get(i); + for (int i = backingIndices.indices.size() - 1; i >= 0; i--) { + Index index = backingIndices.indices.get(i); IndexMetadata im = metadata.index(index); // TODO: make index_mode, start and end time fields in IndexMetadata class. @@ -306,7 +270,7 @@ public Index selectTimeSeriesWriteIndex(Instant timestamp, Metadata metadata) { public void validate(Function imSupplier) { if (indexMode == IndexMode.TIME_SERIES) { // Get a sorted overview of each backing index with there start and end time range: - var startAndEndTimes = indices.stream().map(index -> { + var startAndEndTimes = backingIndices.indices.stream().map(index -> { IndexMetadata im = imSupplier.apply(index.getName()); if (im == null) { throw new IllegalStateException("index [" + index.getName() + "] is not found in the index metadata supplier"); @@ -407,7 +371,19 @@ public DataStreamLifecycle getLifecycle() { * Returns the latest auto sharding event that happened for this data stream */ public DataStreamAutoShardingEvent getAutoShardingEvent() { - return autoShardingEvent; + return backingIndices.autoShardingEvent; + } + + public DataStreamIndices getBackingIndices() { + return backingIndices; + } + + public DataStreamIndices getFailureIndices() { + return failureIndices; + } + + public DataStreamIndices getDataStreamIndices(boolean failureStore) { + return failureStore ? this.failureIndices : backingIndices; } /** @@ -446,15 +422,11 @@ public DataStream unsafeRollover(Index writeIndex, long generation, boolean time indexMode = null; } - List backingIndices = new ArrayList<>(indices); + List backingIndices = new ArrayList<>(this.backingIndices.indices); backingIndices.add(writeIndex); - return copy().setIndices(backingIndices) - .setGeneration(generation) - .setReplicated(false) - .setIndexMode(indexMode) - .setAutoShardingEvent(autoShardingEvent) - .setRolloverOnWrite(false) - .build(); + return copy().setBackingIndices( + this.backingIndices.copy().setIndices(backingIndices).setAutoShardingEvent(autoShardingEvent).setRolloverOnWrite(false).build() + ).setGeneration(generation).setIndexMode(indexMode).build(); } /** @@ -475,56 +447,32 @@ public DataStream rolloverFailureStore(Index writeIndex, long generation) { * Like {@link #rolloverFailureStore(Index, long)}, but does no validation, use with care only. */ public DataStream unsafeRolloverFailureStore(Index writeIndex, long generation) { - List failureIndices = new ArrayList<>(this.failureIndices); + List failureIndices = new ArrayList<>(this.failureIndices.indices); failureIndices.add(writeIndex); - return copy().setGeneration(generation).setReplicated(false).setFailureIndices(failureIndices).build(); + return copy().setGeneration(generation).setFailureIndices(this.failureIndices.copy().setIndices(failureIndices).build()).build(); } /** * Generates the next write index name and generation to be used for rolling over this data stream. * * @param clusterMetadata Cluster metadata + * @param dataStreamIndices The data stream indices that we're generating the next write index name and generation for * @return tuple of the next write index name and next generation. */ - public Tuple nextWriteIndexAndGeneration(Metadata clusterMetadata) { - ensureNotReplicated(); - return unsafeNextWriteIndexAndGeneration(clusterMetadata); - } - - /** - * Like {@link #nextWriteIndexAndGeneration(Metadata)}, but does no validation, use with care only. - */ - public Tuple unsafeNextWriteIndexAndGeneration(Metadata clusterMetadata) { - return generateNextWriteIndexAndGeneration(clusterMetadata, DataStream::getDefaultBackingIndexName); - } - - /** - * Generates the next write index name and generation to be used for rolling over the failure store of this data stream. - * - * @param clusterMetadata Cluster metadata - * @return tuple of the next failure store write index name and next generation. - */ - public Tuple nextFailureStoreWriteIndexAndGeneration(Metadata clusterMetadata) { + public Tuple nextWriteIndexAndGeneration(Metadata clusterMetadata, DataStreamIndices dataStreamIndices) { ensureNotReplicated(); - return unsafeNextFailureStoreWriteIndexAndGeneration(clusterMetadata); + return unsafeNextWriteIndexAndGeneration(clusterMetadata, dataStreamIndices); } /** - * Like {@link #nextFailureStoreWriteIndexAndGeneration(Metadata)}, but does no validation, use with care only. + * Like {@link #nextWriteIndexAndGeneration(Metadata, DataStreamIndices)}, but does no validation, use with care only. */ - public Tuple unsafeNextFailureStoreWriteIndexAndGeneration(Metadata clusterMetadata) { - return generateNextWriteIndexAndGeneration(clusterMetadata, DataStream::getDefaultFailureStoreName); - } - - private Tuple generateNextWriteIndexAndGeneration( - Metadata clusterMetadata, - TriFunction nameGenerator - ) { + public Tuple unsafeNextWriteIndexAndGeneration(Metadata clusterMetadata, DataStreamIndices dataStreamIndices) { String newWriteIndexName; long generation = this.generation; long currentTimeMillis = timeProvider.getAsLong(); do { - newWriteIndexName = nameGenerator.apply(getName(), ++generation, currentTimeMillis); + newWriteIndexName = dataStreamIndices.generateName(name, ++generation, currentTimeMillis); } while (clusterMetadata.hasIndexAbstraction(newWriteIndexName)); return Tuple.tuple(newWriteIndexName, generation); } @@ -544,14 +492,14 @@ private void ensureNotReplicated() { * @throws IllegalArgumentException if {@code index} is not a backing index or is the current write index of the data stream */ public DataStream removeBackingIndex(Index index) { - int backingIndexPosition = indices.indexOf(index); + int backingIndexPosition = backingIndices.indices.indexOf(index); if (backingIndexPosition == -1) { throw new IllegalArgumentException( String.format(Locale.ROOT, "index [%s] is not part of data stream [%s]", index.getName(), name) ); } - if (indices.size() == (backingIndexPosition + 1)) { + if (backingIndices.indices.size() == (backingIndexPosition + 1)) { throw new IllegalArgumentException( String.format( Locale.ROOT, @@ -562,10 +510,12 @@ public DataStream removeBackingIndex(Index index) { ); } - List backingIndices = new ArrayList<>(indices); + List backingIndices = new ArrayList<>(this.backingIndices.indices); backingIndices.remove(index); - assert backingIndices.size() == indices.size() - 1; - return copy().setIndices(backingIndices).setGeneration(generation + 1).build(); + assert backingIndices.size() == this.backingIndices.indices.size() - 1; + return copy().setBackingIndices(this.backingIndices.copy().setIndices(backingIndices).build()) + .setGeneration(generation + 1) + .build(); } /** @@ -578,7 +528,7 @@ public DataStream removeBackingIndex(Index index) { * data stream */ public DataStream removeFailureStoreIndex(Index index) { - int failureIndexPosition = failureIndices.indexOf(index); + int failureIndexPosition = failureIndices.indices.indexOf(index); if (failureIndexPosition == -1) { throw new IllegalArgumentException( @@ -588,7 +538,7 @@ public DataStream removeFailureStoreIndex(Index index) { // TODO: When failure stores are lazily created, this wont necessarily be required anymore. We can remove the failure store write // index as long as we mark the data stream to lazily rollover the failure store with no conditions on its next write - if (failureIndices.size() == (failureIndexPosition + 1)) { + if (failureIndices.indices.size() == (failureIndexPosition + 1)) { throw new IllegalArgumentException( String.format( Locale.ROOT, @@ -599,10 +549,12 @@ public DataStream removeFailureStoreIndex(Index index) { ); } - List updatedFailureIndices = new ArrayList<>(failureIndices); + List updatedFailureIndices = new ArrayList<>(failureIndices.indices); updatedFailureIndices.remove(index); - assert updatedFailureIndices.size() == failureIndices.size() - 1; - return copy().setGeneration(generation + 1).setFailureIndices(updatedFailureIndices).build(); + assert updatedFailureIndices.size() == failureIndices.indices.size() - 1; + return copy().setFailureIndices(failureIndices.copy().setIndices(updatedFailureIndices).build()) + .setGeneration(generation + 1) + .build(); } /** @@ -616,14 +568,14 @@ public DataStream removeFailureStoreIndex(Index index) { * existing index. */ public DataStream replaceBackingIndex(Index existingBackingIndex, Index newBackingIndex) { - List backingIndices = new ArrayList<>(indices); + List backingIndices = new ArrayList<>(this.backingIndices.indices); int backingIndexPosition = backingIndices.indexOf(existingBackingIndex); if (backingIndexPosition == -1) { throw new IllegalArgumentException( String.format(Locale.ROOT, "index [%s] is not part of data stream [%s]", existingBackingIndex.getName(), name) ); } - if (indices.size() == (backingIndexPosition + 1)) { + if (this.backingIndices.indices.size() == (backingIndexPosition + 1)) { throw new IllegalArgumentException( String.format( Locale.ROOT, @@ -634,7 +586,9 @@ public DataStream replaceBackingIndex(Index existingBackingIndex, Index newBacki ); } backingIndices.set(backingIndexPosition, newBackingIndex); - return copy().setIndices(backingIndices).setGeneration(generation + 1).build(); + return copy().setBackingIndices(this.backingIndices.copy().setIndices(backingIndices).build()) + .setGeneration(generation + 1) + .build(); } /** @@ -656,10 +610,12 @@ public DataStream addBackingIndex(Metadata clusterMetadata, Index index) { // ensure that no aliases reference index ensureNoAliasesOnIndex(clusterMetadata, index); - List backingIndices = new ArrayList<>(indices); + List backingIndices = new ArrayList<>(this.backingIndices.indices); backingIndices.add(0, index); - assert backingIndices.size() == indices.size() + 1; - return copy().setIndices(backingIndices).setGeneration(generation + 1).build(); + assert backingIndices.size() == this.backingIndices.indices.size() + 1; + return copy().setBackingIndices(this.backingIndices.copy().setIndices(backingIndices).build()) + .setGeneration(generation + 1) + .build(); } /** @@ -680,10 +636,12 @@ public DataStream addFailureStoreIndex(Metadata clusterMetadata, Index index) { ensureNoAliasesOnIndex(clusterMetadata, index); - List updatedFailureIndices = new ArrayList<>(failureIndices); + List updatedFailureIndices = new ArrayList<>(failureIndices.indices); updatedFailureIndices.add(0, index); - assert updatedFailureIndices.size() == failureIndices.size() + 1; - return copy().setGeneration(generation + 1).setFailureIndices(updatedFailureIndices).build(); + assert updatedFailureIndices.size() == failureIndices.indices.size() + 1; + return copy().setFailureIndices(failureIndices.copy().setIndices(updatedFailureIndices).build()) + .setGeneration(generation + 1) + .build(); } /** @@ -742,7 +700,7 @@ public DataStream promoteDataStream() { @Nullable public DataStream snapshot(Collection indicesInSnapshot) { // do not include indices not available in the snapshot - List reconciledIndices = new ArrayList<>(this.indices); + List reconciledIndices = new ArrayList<>(this.backingIndices.indices); if (reconciledIndices.removeIf(x -> indicesInSnapshot.contains(x.getName()) == false) == false) { return this; } @@ -751,7 +709,9 @@ public DataStream snapshot(Collection indicesInSnapshot) { return null; } - return copy().setIndices(reconciledIndices).setMetadata(metadata == null ? null : new HashMap<>(metadata)).build(); + return copy().setBackingIndices(backingIndices.copy().setIndices(reconciledIndices).build()) + .setMetadata(metadata == null ? null : new HashMap<>(metadata)) + .build(); } /** @@ -792,7 +752,7 @@ public List getDownsamplingRoundsFor( Function indexMetadataSupplier, LongSupplier nowSupplier ) { - assert indices.contains(index) : "the provided index must be a backing index for this datastream"; + assert backingIndices.indices.contains(index) : "the provided index must be a backing index for this datastream"; if (lifecycle == null || lifecycle.getDownsamplingRounds() == null) { return List.of(); } @@ -831,7 +791,7 @@ public List getNonWriteIndicesOlderThan( LongSupplier nowSupplier ) { List olderIndices = new ArrayList<>(); - for (Index index : indices) { + for (Index index : backingIndices.indices) { if (isIndexOderThan(index, retentionPeriod.getMillis(), nowSupplier.getAsLong(), indicesPredicate, indexMetadataSupplier)) { olderIndices.add(index); } @@ -864,7 +824,7 @@ private boolean isIndexOderThan( * we return false. */ public boolean isIndexManagedByDataStreamLifecycle(Index index, Function indexMetadataSupplier) { - if (indices.contains(index) == false) { + if (backingIndices.indices.contains(index) == false) { return false; } IndexMetadata indexMetadata = indexMetadataSupplier.apply(index.getName()); @@ -936,13 +896,7 @@ public static String getDefaultBackingIndexName(String dataStreamName, long gene * @return backing index name */ public static String getDefaultBackingIndexName(String dataStreamName, long generation, long epochMillis) { - return String.format( - Locale.ROOT, - BACKING_INDEX_PREFIX + "%s-%s-%06d", - dataStreamName, - DATE_FORMATTER.formatMillis(epochMillis), - generation - ); + return getDefaultIndexName(BACKING_INDEX_PREFIX, dataStreamName, generation, epochMillis); } /** @@ -955,33 +909,65 @@ public static String getDefaultBackingIndexName(String dataStreamName, long gene * @return backing index name */ public static String getDefaultFailureStoreName(String dataStreamName, long generation, long epochMillis) { - return String.format( - Locale.ROOT, - FAILURE_STORE_PREFIX + "%s-%s-%06d", - dataStreamName, - DATE_FORMATTER.formatMillis(epochMillis), - generation - ); + return getDefaultIndexName(FAILURE_STORE_PREFIX, dataStreamName, generation, epochMillis); } - public DataStream(StreamInput in) throws IOException { - this( - readName(in), - readIndices(in), - in.readVLong(), - in.readGenericMap(), - in.readBoolean(), - in.readBoolean(), - in.readBoolean(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readBoolean() : false, - in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null, - in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, - in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? in.readBoolean() : false, - in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0) ? in.readBoolean() : false, - in.getTransportVersion().onOrAfter(DataStream.ADDED_AUTO_SHARDING_EVENT_VERSION) - ? in.readOptionalWriteable(DataStreamAutoShardingEvent::new) - : null + /** + * Generates the name of the index that conforms to the default naming convention for indices + * on data streams given the specified prefix, data stream name, generation, and time. + * + * @param prefix the prefix that the index name should have + * @param dataStreamName name of the data stream + * @param generation generation of the data stream + * @param epochMillis creation time for the backing index + * @return backing index name + */ + private static String getDefaultIndexName(String prefix, String dataStreamName, long generation, long epochMillis) { + return String.format(Locale.ROOT, prefix + "%s-%s-%06d", dataStreamName, DATE_FORMATTER.formatMillis(epochMillis), generation); + } + + public static DataStream read(StreamInput in) throws IOException { + var name = readName(in); + var backingIndicesBuilder = DataStreamIndices.backingIndicesBuilder(readIndices(in)); + var generation = in.readVLong(); + var metadata = in.readGenericMap(); + var hidden = in.readBoolean(); + var replicated = in.readBoolean(); + var system = in.readBoolean(); + var allowCustomRouting = in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readBoolean() : false; + var indexMode = in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null; + var lifecycle = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) + ? in.readOptionalWriteable(DataStreamLifecycle::new) + : null; + var failureStoreEnabled = in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) + ? in.readBoolean() + : false; + var failureIndices = in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) + ? readIndices(in) + : List.of(); + var failureIndicesBuilder = DataStreamIndices.failureIndicesBuilder(failureIndices); + backingIndicesBuilder.setRolloverOnWrite(in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0) ? in.readBoolean() : false); + if (in.getTransportVersion().onOrAfter(DataStream.ADDED_AUTO_SHARDING_EVENT_VERSION)) { + backingIndicesBuilder.setAutoShardingEvent(in.readOptionalWriteable(DataStreamAutoShardingEvent::new)); + } + if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_FIELD_PARITY)) { + failureIndicesBuilder.setRolloverOnWrite(in.readBoolean()) + .setAutoShardingEvent(in.readOptionalWriteable(DataStreamAutoShardingEvent::new)); + } + return new DataStream( + name, + generation, + metadata, + hidden, + replicated, + system, + System::currentTimeMillis, + allowCustomRouting, + indexMode, + lifecycle, + failureStoreEnabled, + backingIndicesBuilder.build(), + failureIndicesBuilder.build() ); } @@ -996,14 +982,14 @@ static List readIndices(StreamInput in) throws IOException { } public static Diff readDiffFrom(StreamInput in) throws IOException { - return SimpleDiffable.readDiffFrom(DataStream::new, in); + return SimpleDiffable.readDiffFrom(DataStream::read, in); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(TIMESTAMP_FIELD_NAME); // TODO: clear this out in the future https://github.com/elastic/elasticsearch/issues/101991 - out.writeCollection(indices); + out.writeCollection(backingIndices.indices); out.writeVLong(generation); out.writeGenericMap(metadata); out.writeBoolean(hidden); @@ -1020,13 +1006,17 @@ public void writeTo(StreamOutput out) throws IOException { } if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { out.writeBoolean(failureStoreEnabled); - out.writeCollection(failureIndices); + out.writeCollection(failureIndices.indices); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { - out.writeBoolean(rolloverOnWrite); + out.writeBoolean(backingIndices.rolloverOnWrite); } if (out.getTransportVersion().onOrAfter(DataStream.ADDED_AUTO_SHARDING_EVENT_VERSION)) { - out.writeOptionalWriteable(autoShardingEvent); + out.writeOptionalWriteable(backingIndices.autoShardingEvent); + } + if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_FIELD_PARITY)) { + out.writeBoolean(failureIndices.rolloverOnWrite); + out.writeOptionalWriteable(failureIndices.autoShardingEvent); } } @@ -1045,30 +1035,41 @@ public void writeTo(StreamOutput out) throws IOException { public static final ParseField FAILURE_INDICES_FIELD = new ParseField("failure_indices"); public static final ParseField ROLLOVER_ON_WRITE_FIELD = new ParseField("rollover_on_write"); public static final ParseField AUTO_SHARDING_FIELD = new ParseField("auto_sharding"); + public static final ParseField FAILURE_ROLLOVER_ON_WRITE_FIELD = new ParseField("failure_rollover_on_write"); + public static final ParseField FAILURE_AUTO_SHARDING_FIELD = new ParseField("failure_auto_sharding"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_stream", args -> { // Fields behind a feature flag need to be parsed last otherwise the parser will fail when the feature flag is disabled. // Until the feature flag is removed we keep them separately to be mindful of this. boolean failureStoreEnabled = DataStream.isFailureStoreFeatureFlagEnabled() && args[12] != null && (boolean) args[12]; - List failureStoreIndices = DataStream.isFailureStoreFeatureFlagEnabled() && args[13] != null - ? (List) args[13] - : List.of(); + DataStreamIndices failureIndices = DataStream.isFailureStoreFeatureFlagEnabled() + ? new DataStreamIndices( + FAILURE_STORE_PREFIX, + args[13] != null ? (List) args[13] : List.of(), + args[14] != null && (boolean) args[14], + (DataStreamAutoShardingEvent) args[15] + ) + : new DataStreamIndices(FAILURE_STORE_PREFIX, List.of(), false, null); return new DataStream( (String) args[0], - (List) args[1], (Long) args[2], (Map) args[3], args[4] != null && (boolean) args[4], args[5] != null && (boolean) args[5], args[6] != null && (boolean) args[6], + System::currentTimeMillis, args[7] != null && (boolean) args[7], args[8] != null ? IndexMode.fromString((String) args[8]) : null, (DataStreamLifecycle) args[9], failureStoreEnabled, - failureStoreIndices, - args[10] != null && (boolean) args[10], - (DataStreamAutoShardingEvent) args[11] + new DataStreamIndices( + BACKING_INDEX_PREFIX, + (List) args[1], + args[10] != null && (boolean) args[10], + (DataStreamAutoShardingEvent) args[11] + ), + failureIndices ); }); @@ -1105,6 +1106,12 @@ public void writeTo(StreamOutput out) throws IOException { (p, c) -> Index.fromXContent(p), FAILURE_INDICES_FIELD ); + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_ROLLOVER_ON_WRITE_FIELD); + PARSER.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataStreamAutoShardingEvent.fromXContent(p), + FAILURE_AUTO_SHARDING_FIELD + ); } } @@ -1132,11 +1139,8 @@ public XContentBuilder toXContent( .startObject() .field(NAME_FIELD.getPreferredName(), TIMESTAMP_FIELD_NAME) .endObject(); - builder.xContentList(INDICES_FIELD.getPreferredName(), indices); + builder.xContentList(INDICES_FIELD.getPreferredName(), backingIndices.indices); builder.field(GENERATION_FIELD.getPreferredName(), generation); - if (DataStream.isFailureStoreFeatureFlagEnabled() && failureIndices.isEmpty() == false) { - builder.xContentList(FAILURE_INDICES_FIELD.getPreferredName(), failureIndices); - } if (metadata != null) { builder.field(METADATA_FIELD.getPreferredName(), metadata); } @@ -1146,6 +1150,15 @@ public XContentBuilder toXContent( builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting); if (DataStream.isFailureStoreFeatureFlagEnabled()) { builder.field(FAILURE_STORE_FIELD.getPreferredName(), failureStoreEnabled); + if (failureIndices.indices.isEmpty() == false) { + builder.xContentList(FAILURE_INDICES_FIELD.getPreferredName(), failureIndices.indices); + } + builder.field(FAILURE_ROLLOVER_ON_WRITE_FIELD.getPreferredName(), failureIndices.rolloverOnWrite); + if (failureIndices.autoShardingEvent != null) { + builder.startObject(FAILURE_AUTO_SHARDING_FIELD.getPreferredName()); + failureIndices.autoShardingEvent.toXContent(builder, params); + builder.endObject(); + } } if (indexMode != null) { builder.field(INDEX_MODE.getPreferredName(), indexMode); @@ -1154,10 +1167,10 @@ public XContentBuilder toXContent( builder.field(LIFECYCLE.getPreferredName()); lifecycle.toXContent(builder, params, rolloverConfiguration, isSystem() ? null : globalRetention); } - builder.field(ROLLOVER_ON_WRITE_FIELD.getPreferredName(), rolloverOnWrite); - if (autoShardingEvent != null) { + builder.field(ROLLOVER_ON_WRITE_FIELD.getPreferredName(), backingIndices.rolloverOnWrite); + if (backingIndices.autoShardingEvent != null) { builder.startObject(AUTO_SHARDING_FIELD.getPreferredName()); - autoShardingEvent.toXContent(builder, params); + backingIndices.autoShardingEvent.toXContent(builder, params); builder.endObject(); } builder.endObject(); @@ -1170,7 +1183,6 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; DataStream that = (DataStream) o; return name.equals(that.name) - && indices.equals(that.indices) && generation == that.generation && Objects.equals(metadata, that.metadata) && hidden == that.hidden @@ -1180,16 +1192,14 @@ public boolean equals(Object o) { && indexMode == that.indexMode && Objects.equals(lifecycle, that.lifecycle) && failureStoreEnabled == that.failureStoreEnabled - && failureIndices.equals(that.failureIndices) - && rolloverOnWrite == that.rolloverOnWrite - && Objects.equals(autoShardingEvent, that.autoShardingEvent); + && Objects.equals(backingIndices, that.backingIndices) + && Objects.equals(failureIndices, that.failureIndices); } @Override public int hashCode() { return Objects.hash( name, - indices, generation, metadata, hidden, @@ -1199,9 +1209,8 @@ public int hashCode() { indexMode, lifecycle, failureStoreEnabled, - failureIndices, - rolloverOnWrite, - autoShardingEvent + backingIndices, + failureIndices ); } @@ -1345,14 +1354,143 @@ public static Builder builder(String name, List indices) { return new Builder(name, indices); } + public static Builder builder(String name, DataStreamIndices backingIndices) { + return new Builder(name, backingIndices); + } + public Builder copy() { return new Builder(this); } + public static class DataStreamIndices { + private final String namePrefix; + private final List indices; + private final boolean rolloverOnWrite; + @Nullable + private final DataStreamAutoShardingEvent autoShardingEvent; + private Set lookup; + + protected DataStreamIndices( + String namePrefix, + List indices, + boolean rolloverOnWrite, + DataStreamAutoShardingEvent autoShardingEvent + ) { + this.namePrefix = namePrefix; + // The list of indices is expected to be an immutable list. We don't create an immutable copy here, as it might have + // impact on the performance on some usages. + this.indices = indices; + this.rolloverOnWrite = rolloverOnWrite; + this.autoShardingEvent = autoShardingEvent; + + assert getLookup().size() == indices.size() : "found duplicate index entries in " + indices; + } + + private Set getLookup() { + if (lookup == null) { + lookup = indices.stream().map(Index::getName).collect(Collectors.toSet()); + } + return lookup; + } + + public Index getWriteIndex() { + return indices.get(indices.size() - 1); + } + + public boolean containsIndex(String index) { + return getLookup().contains(index); + } + + private String generateName(String dataStreamName, long generation, long epochMillis) { + return getDefaultIndexName(namePrefix, dataStreamName, generation, epochMillis); + } + + public static Builder backingIndicesBuilder(List indices) { + return new Builder(BACKING_INDEX_PREFIX, indices); + } + + public static Builder failureIndicesBuilder(List indices) { + return new Builder(FAILURE_STORE_PREFIX, indices); + } + + public Builder copy() { + return new Builder(this); + } + + public List getIndices() { + return indices; + } + + public boolean isRolloverOnWrite() { + return rolloverOnWrite; + } + + public DataStreamAutoShardingEvent getAutoShardingEvent() { + return autoShardingEvent; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DataStreamIndices that = (DataStreamIndices) o; + return rolloverOnWrite == that.rolloverOnWrite + && Objects.equals(namePrefix, that.namePrefix) + && Objects.equals(indices, that.indices) + && Objects.equals(autoShardingEvent, that.autoShardingEvent); + } + + @Override + public int hashCode() { + return Objects.hash(namePrefix, indices, rolloverOnWrite, autoShardingEvent); + } + + public static class Builder { + private final String namePrefix; + private List indices; + private boolean rolloverOnWrite = false; + @Nullable + private DataStreamAutoShardingEvent autoShardingEvent = null; + + private Builder(String namePrefix, List indices) { + this.namePrefix = namePrefix; + this.indices = indices; + } + + private Builder(DataStreamIndices dataStreamIndices) { + this.namePrefix = dataStreamIndices.namePrefix; + this.indices = dataStreamIndices.indices; + this.rolloverOnWrite = dataStreamIndices.rolloverOnWrite; + this.autoShardingEvent = dataStreamIndices.autoShardingEvent; + } + + /** + * Set the list of indices. We always create an immutable copy as that's what the constructor expects. + */ + public Builder setIndices(List indices) { + this.indices = List.copyOf(indices); + return this; + } + + public Builder setRolloverOnWrite(boolean rolloverOnWrite) { + this.rolloverOnWrite = rolloverOnWrite; + return this; + } + + public Builder setAutoShardingEvent(DataStreamAutoShardingEvent autoShardingEvent) { + this.autoShardingEvent = autoShardingEvent; + return this; + } + + public DataStreamIndices build() { + return new DataStreamIndices(namePrefix, indices, rolloverOnWrite, autoShardingEvent); + } + } + } + public static class Builder { private LongSupplier timeProvider = System::currentTimeMillis; private String name; - private List indices; private long generation = 1; @Nullable private Map metadata = null; @@ -1364,22 +1502,23 @@ public static class Builder { private IndexMode indexMode = null; @Nullable private DataStreamLifecycle lifecycle = null; - private boolean rolloverOnWrite = false; private boolean failureStoreEnabled = false; - private List failureIndices = List.of(); - @Nullable - private DataStreamAutoShardingEvent autoShardingEvent = null; + private DataStreamIndices backingIndices; + private DataStreamIndices failureIndices = DataStreamIndices.failureIndicesBuilder(List.of()).build(); - public Builder(String name, List indices) { + private Builder(String name, List indices) { + this(name, DataStreamIndices.backingIndicesBuilder(indices).build()); + } + + private Builder(String name, DataStreamIndices backingIndices) { this.name = name; - assert indices.isEmpty() == false : "Cannot create data stream with empty backing indices"; - this.indices = indices; + assert backingIndices.indices.isEmpty() == false : "Cannot create data stream with empty backing indices"; + this.backingIndices = backingIndices; } - public Builder(DataStream dataStream) { + private Builder(DataStream dataStream) { timeProvider = dataStream.timeProvider; name = dataStream.name; - indices = dataStream.indices; generation = dataStream.generation; metadata = dataStream.metadata; hidden = dataStream.hidden; @@ -1388,10 +1527,9 @@ public Builder(DataStream dataStream) { allowCustomRouting = dataStream.allowCustomRouting; indexMode = dataStream.indexMode; lifecycle = dataStream.lifecycle; - rolloverOnWrite = dataStream.rolloverOnWrite; failureStoreEnabled = dataStream.failureStoreEnabled; + backingIndices = dataStream.backingIndices; failureIndices = dataStream.failureIndices; - autoShardingEvent = dataStream.autoShardingEvent; } public Builder setTimeProvider(LongSupplier timeProvider) { @@ -1404,12 +1542,6 @@ public Builder setName(String name) { return this; } - public Builder setIndices(List indices) { - assert indices.isEmpty() == false : "Cannot create data stream with empty backing indices"; - this.indices = indices; - return this; - } - public Builder setGeneration(long generation) { this.generation = generation; return this; @@ -1450,30 +1582,34 @@ public Builder setLifecycle(DataStreamLifecycle lifecycle) { return this; } - public Builder setRolloverOnWrite(boolean rolloverOnWrite) { - this.rolloverOnWrite = rolloverOnWrite; + public Builder setFailureStoreEnabled(boolean failureStoreEnabled) { + this.failureStoreEnabled = failureStoreEnabled; return this; } - public Builder setFailureStoreEnabled(boolean failureStoreEnabled) { - this.failureStoreEnabled = failureStoreEnabled; + public Builder setBackingIndices(DataStreamIndices backingIndices) { + assert backingIndices.indices.isEmpty() == false : "Cannot create data stream with empty backing indices"; + this.backingIndices = backingIndices; return this; } - public Builder setFailureIndices(List failureIndices) { + public Builder setFailureIndices(DataStreamIndices failureIndices) { this.failureIndices = failureIndices; return this; } - public Builder setAutoShardingEvent(DataStreamAutoShardingEvent autoShardingEvent) { - this.autoShardingEvent = autoShardingEvent; + public Builder setDataStreamIndices(boolean targetFailureStore, DataStreamIndices indices) { + if (targetFailureStore) { + setFailureIndices(indices); + } else { + setBackingIndices(indices); + } return this; } public DataStream build() { return new DataStream( name, - indices, generation, metadata, hidden, @@ -1484,9 +1620,8 @@ public DataStream build() { indexMode, lifecycle, failureStoreEnabled, - failureIndices, - rolloverOnWrite, - autoShardingEvent + backingIndices, + failureIndices ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java index fef9ebe993a4d..c65f83eca0aa2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java @@ -89,7 +89,7 @@ public DataStreamMetadata( public DataStreamMetadata(StreamInput in) throws IOException { this( - in.readImmutableOpenMap(StreamInput::readString, DataStream::new), + in.readImmutableOpenMap(StreamInput::readString, DataStream::read), in.readImmutableOpenMap(StreamInput::readString, DataStreamAlias::new) ); } @@ -265,7 +265,7 @@ public String toString() { static class DataStreamMetadataDiff implements NamedDiff { private static final DiffableUtils.DiffableValueReader DS_DIFF_READER = new DiffableUtils.DiffableValueReader<>( - DataStream::new, + DataStream::read, DataStream::readDiffFrom ); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 678655252248f..681dcb3e314e3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -189,13 +189,17 @@ public void writeTo(StreamOutput out) throws IOException { } } + /** + * This is a safety limit that should only be exceeded in very rare and special cases. The assumption is that + * 99% of the users have less than 1024 shards per index. We also make it a hard check that requires restart of nodes + * if a cluster should allow to create more than 1024 shards per index. NOTE: this does not limit the number of shards + * per cluster. this also prevents creating stuff like a new index with millions of shards by accident which essentially + * kills the entire cluster with OOM on the spot. + */ + public static final String PER_INDEX_MAX_NUMBER_OF_SHARDS = "1024"; + static Setting buildNumberOfShardsSetting() { - /* This is a safety limit that should only be exceeded in very rare and special cases. The assumption is that - * 99% of the users have less than 1024 shards per index. We also make it a hard check that requires restart of nodes - * if a cluster should allow to create more than 1024 shards per index. NOTE: this does not limit the number of shards - * per cluster. this also prevents creating stuff like a new index with millions of shards by accident which essentially - * kills the entire cluster with OOM on the spot.*/ - final int maxNumShards = Integer.parseInt(System.getProperty("es.index.max_number_of_shards", "1024")); + final int maxNumShards = Integer.parseInt(System.getProperty("es.index.max_number_of_shards", PER_INDEX_MAX_NUMBER_OF_SHARDS)); if (maxNumShards < 1) { throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index effc89d8e535a..8bc8f9d96bf24 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -425,7 +425,7 @@ private static void resolveIndicesForDataStream(Context context, DataStream data if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { // We short-circuit here, if failure indices are not allowed and they can be skipped if (context.getOptions().allowFailureIndices() || context.getOptions().ignoreUnavailable() == false) { - for (Index index : dataStream.getFailureIndices()) { + for (Index index : dataStream.getFailureIndices().getIndices()) { if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { concreteIndicesResult.add(index); } @@ -470,7 +470,7 @@ private static boolean resolvesToMoreThanOneIndex(IndexAbstraction indexAbstract count += dataStream.getIndices().size(); } if (shouldIncludeFailureIndices(context.getOptions(), dataStream)) { - count += dataStream.getFailureIndices().size(); + count += dataStream.getFailureIndices().getIndices().size(); } return count > 1; } @@ -1431,7 +1431,7 @@ && shouldIncludeFailureIndices(context.getOptions(), (DataStream) indexAbstracti DataStream dataStream = (DataStream) indexAbstraction; indicesStateStream = Stream.concat( indicesStateStream, - dataStream.getFailureIndices().stream().map(context.state.metadata()::index) + dataStream.getFailureIndices().getIndices().stream().map(context.state.metadata()::index) ); } if (excludeState != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index fec209960597b..e25c12d0c2ad7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -2600,7 +2600,10 @@ private static boolean assertContainsIndexIfDataStream(DataStream parent, IndexM || parent.getIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName())) || (DataStream.isFailureStoreFeatureFlagEnabled() && parent.isFailureStoreEnabled() - && parent.getFailureIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))) + && parent.getFailureIndices() + .getIndices() + .stream() + .anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))) : "Expected data stream [" + parent.getName() + "] to contain index " + indexMetadata.getIndex(); return true; } @@ -2623,7 +2626,7 @@ private static void collectDataStreams( indexToDataStreamLookup.put(i.getName(), dataStream); } if (DataStream.isFailureStoreFeatureFlagEnabled() && dataStream.isFailureStoreEnabled()) { - for (Index i : dataStream.getFailureIndices()) { + for (Index i : dataStream.getFailureIndices().getIndices()) { indexToDataStreamLookup.put(i.getName(), dataStream); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 2d1d38ac926d6..1062f741cf0bd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -111,8 +111,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { ); DataStream createdDataStream = clusterState.metadata().dataStreams().get(request.name); firstBackingIndexRef.set(createdDataStream.getIndices().get(0).getName()); - if (createdDataStream.getFailureIndices().isEmpty() == false) { - firstFailureStoreRef.set(createdDataStream.getFailureIndices().get(0).getName()); + if (createdDataStream.getFailureIndices().getIndices().isEmpty() == false) { + firstFailureStoreRef.set(createdDataStream.getFailureIndices().getIndices().get(0).getName()); } return clusterState; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index a018f3d93a9bc..7363e71d65c72 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -77,7 +77,12 @@ public Tuple executeTask( ClusterState clusterState ) { return new Tuple<>( - setRolloverOnWrite(clusterState, setRolloverOnWriteTask.getDataStreamName(), setRolloverOnWriteTask.rolloverOnWrite()), + setRolloverOnWrite( + clusterState, + setRolloverOnWriteTask.getDataStreamName(), + setRolloverOnWriteTask.rolloverOnWrite(), + setRolloverOnWriteTask.targetFailureStore() + ), setRolloverOnWriteTask ); } @@ -152,13 +157,14 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String public void setRolloverOnWrite( String dataStreamName, boolean rolloverOnWrite, + boolean targetFailureStore, TimeValue ackTimeout, TimeValue masterTimeout, ActionListener listener ) { setRolloverOnWriteTaskQueue.submitTask( "set-rollover-on-write", - new SetRolloverOnWriteTask(dataStreamName, rolloverOnWrite, ackTimeout, listener), + new SetRolloverOnWriteTask(dataStreamName, rolloverOnWrite, targetFailureStore, ackTimeout, listener), masterTimeout ); } @@ -230,16 +236,25 @@ ClusterState updateDataLifecycle(ClusterState currentState, List dataStr * @param currentState the initial cluster state * @param dataStreamName the name of the data stream to be updated * @param rolloverOnWrite the value of the flag + * @param targetFailureStore whether this rollover targets the failure store or the backing indices * @return the updated cluster state */ - public static ClusterState setRolloverOnWrite(ClusterState currentState, String dataStreamName, boolean rolloverOnWrite) { + public static ClusterState setRolloverOnWrite( + ClusterState currentState, + String dataStreamName, + boolean rolloverOnWrite, + boolean targetFailureStore + ) { Metadata metadata = currentState.metadata(); var dataStream = validateDataStream(metadata, dataStreamName); - if (dataStream.rolloverOnWrite() == rolloverOnWrite) { + var indices = dataStream.getDataStreamIndices(targetFailureStore); + if (indices.isRolloverOnWrite() == rolloverOnWrite) { return currentState; } Metadata.Builder builder = Metadata.builder(metadata); - builder.put(dataStream.copy().setRolloverOnWrite(rolloverOnWrite).build()); + builder.put( + dataStream.copy().setDataStreamIndices(targetFailureStore, indices.copy().setRolloverOnWrite(rolloverOnWrite).build()).build() + ); return ClusterState.builder(currentState).metadata(builder.build()).build(); } @@ -286,7 +301,7 @@ private static void removeBackingIndex( ) { boolean indexNotRemoved = true; DataStream dataStream = validateDataStream(metadata, dataStreamName); - List targetIndices = failureStore ? dataStream.getFailureIndices() : dataStream.getIndices(); + List targetIndices = failureStore ? dataStream.getFailureIndices().getIndices() : dataStream.getIndices(); for (Index backingIndex : targetIndices) { if (backingIndex.getName().equals(indexName)) { if (failureStore) { @@ -365,16 +380,19 @@ static class SetRolloverOnWriteTask extends AckedBatchedClusterStateUpdateTask { private final String dataStreamName; private final boolean rolloverOnWrite; + private final boolean targetFailureStore; SetRolloverOnWriteTask( String dataStreamName, boolean rolloverOnWrite, + boolean targetFailureStore, TimeValue ackTimeout, ActionListener listener ) { super(ackTimeout, listener); this.dataStreamName = dataStreamName; this.rolloverOnWrite = rolloverOnWrite; + this.targetFailureStore = targetFailureStore; } public String getDataStreamName() { @@ -384,5 +402,9 @@ public String getDataStreamName() { public boolean rolloverOnWrite() { return rolloverOnWrite; } + + public boolean targetFailureStore() { + return targetFailureStore; + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 631845dc33288..f5bb97af7625f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -1864,7 +1864,7 @@ public static class PutRequest { CompressedXContent mappings = null; List aliases = new ArrayList<>(); - TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; + TimeValue masterTimeout = MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT; public PutRequest(String cause, String name) { this.cause = cause; @@ -1914,7 +1914,7 @@ public PutRequest version(Integer version) { public static class RemoveRequest { final String name; - TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; + TimeValue masterTimeout = MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT; public RemoveRequest(String name) { this.name = name; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DataTier.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DataTier.java index ebdf6e4b3d8ee..3b1257a510747 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DataTier.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DataTier.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.snapshots.SearchableSnapshotsSettings; @@ -223,14 +224,14 @@ public static class DefaultHotAllocationSettingProvider implements IndexSettingP @Override public Settings getAdditionalIndexSettings( String indexName, - String dataStreamName, - boolean timeSeries, + @Nullable String dataStreamName, + boolean isTimeSeries, Metadata metadata, Instant resolvedAt, - Settings allSettings, + Settings indexTemplateAndCreateRequestSettings, List combinedTemplateMappings ) { - Set settings = allSettings.keySet(); + Set settings = indexTemplateAndCreateRequestSettings.keySet(); if (settings.contains(TIER_PREFERENCE)) { // just a marker -- this null value will be removed or overridden by the template/request settings return NULL_TIER_PREFERENCE_SETTINGS; diff --git a/server/src/main/java/org/elasticsearch/common/time/CharSubSequence.java b/server/src/main/java/org/elasticsearch/common/time/CharSubSequence.java new file mode 100644 index 0000000000000..39dbb83bdf5a4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/CharSubSequence.java @@ -0,0 +1,68 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +import java.util.stream.IntStream; + +/** + * A CharSequence that provides a subsequence of another CharSequence without allocating a new backing array (as String does) + */ +class CharSubSequence implements CharSequence { + private final CharSequence wrapped; + private final int startOffset; // inclusive + private final int endOffset; // exclusive + + CharSubSequence(CharSequence wrapped, int startOffset, int endOffset) { + if (startOffset < 0) throw new IllegalArgumentException(); + if (endOffset > wrapped.length()) throw new IllegalArgumentException(); + if (endOffset < startOffset) throw new IllegalArgumentException(); + + this.wrapped = wrapped; + this.startOffset = startOffset; + this.endOffset = endOffset; + } + + @Override + public int length() { + return endOffset - startOffset; + } + + @Override + public char charAt(int index) { + int adjustedIndex = index + startOffset; + if (adjustedIndex < startOffset || adjustedIndex >= endOffset) throw new IndexOutOfBoundsException(index); + return wrapped.charAt(adjustedIndex); + } + + @Override + public boolean isEmpty() { + return startOffset == endOffset; + } + + @Override + public CharSequence subSequence(int start, int end) { + int adjustedStart = start + startOffset; + int adjustedEnd = end + startOffset; + if (adjustedStart < startOffset) throw new IndexOutOfBoundsException(start); + if (adjustedEnd > endOffset) throw new IndexOutOfBoundsException(end); + if (adjustedStart > adjustedEnd) throw new IndexOutOfBoundsException(); + + return wrapped.subSequence(adjustedStart, adjustedEnd); + } + + @Override + public IntStream chars() { + return wrapped.chars().skip(startOffset).limit(endOffset - startOffset); + } + + @Override + public String toString() { + return wrapped.subSequence(startOffset, endOffset).toString(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 7dae11fb8d720..1133eac3f8f7b 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -9,7 +9,10 @@ package org.elasticsearch.common.time; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Booleans; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.logging.internal.spi.LoggerFactory; import java.time.Instant; import java.time.LocalDate; @@ -30,6 +33,7 @@ import java.time.temporal.TemporalQuery; import java.time.temporal.WeekFields; import java.util.Locale; +import java.util.Set; import java.util.stream.Stream; import static java.time.temporal.ChronoField.DAY_OF_MONTH; @@ -43,6 +47,24 @@ public class DateFormatters { + /** + * The ISO8601 parser is as close as possible to the java.time based parsers, but there are some strings + * that are no longer accepted (multiple fractional seconds, or multiple timezones) by the ISO parser. + * If a string cannot be parsed by the ISO parser, it then tries the java.time one. + * If there's lots of these strings, trying the ISO parser, then the java.time parser, might cause a performance drop. + * So provide a JVM option so that users can just use the java.time parsers, if they really need to. + */ + @UpdateForV9 // evaluate if we need to deprecate/remove this + private static final boolean JAVA_TIME_PARSERS_ONLY = Booleans.parseBoolean(System.getProperty("es.datetime.java_time_parsers"), false); + + static { + // when this is used directly in tests ES logging may not have been initialized yet + LoggerFactory logger; + if (JAVA_TIME_PARSERS_ONLY && (logger = LoggerFactory.provider()) != null) { + logger.getLogger(DateFormatters.class).info("Using java.time datetime parsers only"); + } + } + private static DateFormatter newDateFormatter(String format, DateTimeFormatter formatter) { return new JavaDateFormatter(format, new JavaTimeDateTimePrinter(formatter), new JavaTimeDateTimeParser(formatter)); } @@ -168,11 +190,18 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p /** * Returns a generic ISO datetime parser where the date is mandatory and the time is optional. */ - private static final DateFormatter STRICT_DATE_OPTIONAL_TIME = newDateFormatter( - "strict_date_optional_time", - STRICT_DATE_OPTIONAL_TIME_PRINTER, - STRICT_DATE_OPTIONAL_TIME_FORMATTER - ); + private static final DateFormatter STRICT_DATE_OPTIONAL_TIME; + static { + DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(STRICT_DATE_OPTIONAL_TIME_FORMATTER); + + STRICT_DATE_OPTIONAL_TIME = new JavaDateFormatter( + "strict_date_optional_time", + new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), + JAVA_TIME_PARSERS_ONLY + ? new DateTimeParser[] { javaTimeParser } + : new DateTimeParser[] { new Iso8601DateTimeParser(Set.of(), false).withLocale(Locale.ROOT), javaTimeParser } + ); + } private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS = new DateTimeFormatterBuilder().append( STRICT_YEAR_MONTH_DAY_FORMATTER @@ -224,51 +253,69 @@ private static DateFormatter newDateFormatter(String format, DateTimeFormatter p /** * Returns a generic ISO datetime parser where the date is mandatory and the time is optional with nanosecond resolution. */ - private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS = newDateFormatter( - "strict_date_optional_time_nanos", - STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS, - STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS - ); + private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS; + static { + DateTimeParser javaTimeParser = new JavaTimeDateTimeParser(STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS); + + STRICT_DATE_OPTIONAL_TIME_NANOS = new JavaDateFormatter( + "strict_date_optional_time_nanos", + new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS), + JAVA_TIME_PARSERS_ONLY + ? new DateTimeParser[] { javaTimeParser } + : new DateTimeParser[] { + new Iso8601DateTimeParser(Set.of(HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), true).withLocale(Locale.ROOT), + javaTimeParser } + ); + } /** * Returns a ISO 8601 compatible date time formatter and parser. * This is not fully compatible to the existing spec, which would require far more edge cases, but merely compatible with the * existing legacy joda time ISO date formatter */ - private static final DateFormatter ISO_8601 = newDateFormatter( - "iso8601", - STRICT_DATE_OPTIONAL_TIME_PRINTER, - new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) - .optionalStart() - .appendLiteral('T') - .optionalStart() - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendLiteral(':') - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendLiteral(':') - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .optionalStart() - .appendFraction(NANO_OF_SECOND, 1, 9, true) - .optionalEnd() - .optionalStart() - .appendLiteral(",") - .appendFraction(NANO_OF_SECOND, 1, 9, false) - .optionalEnd() - .optionalEnd() - .optionalEnd() - .optionalEnd() - .optionalStart() - .appendZoneOrOffsetId() - .optionalEnd() - .optionalStart() - .append(TIME_ZONE_FORMATTER_NO_COLON) - .optionalEnd() - .optionalEnd() - .toFormatter(Locale.ROOT) - .withResolverStyle(ResolverStyle.STRICT) - ); + private static final DateFormatter ISO_8601; + static { + DateTimeParser javaTimeParser = new JavaTimeDateTimeParser( + new DateTimeFormatterBuilder().append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .optionalStart() + .appendLiteral('T') + .optionalStart() + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 1, 9, true) + .optionalEnd() + .optionalStart() + .appendLiteral(",") + .appendFraction(NANO_OF_SECOND, 1, 9, false) + .optionalEnd() + .optionalEnd() + .optionalEnd() + .optionalEnd() + .optionalStart() + .appendZoneOrOffsetId() + .optionalEnd() + .optionalStart() + .append(TIME_ZONE_FORMATTER_NO_COLON) + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT) + ); + + ISO_8601 = new JavaDateFormatter( + "iso8601", + new JavaTimeDateTimePrinter(STRICT_DATE_OPTIONAL_TIME_PRINTER), + JAVA_TIME_PARSERS_ONLY + ? new DateTimeParser[] { javaTimeParser } + : new DateTimeParser[] { new Iso8601DateTimeParser(Set.of(), false).withLocale(Locale.ROOT), javaTimeParser } + ); + } ///////////////////////////////////////// // diff --git a/server/src/main/java/org/elasticsearch/common/time/DateTime.java b/server/src/main/java/org/elasticsearch/common/time/DateTime.java new file mode 100644 index 0000000000000..101389b43d9fc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/DateTime.java @@ -0,0 +1,150 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.time.temporal.TemporalField; +import java.time.temporal.TemporalQueries; +import java.time.temporal.TemporalQuery; +import java.time.temporal.UnsupportedTemporalTypeException; + +/** + * Provides information on a parsed datetime + */ +record DateTime( + int years, + Integer months, + Integer days, + Integer hours, + Integer minutes, + Integer seconds, + Integer nanos, + ZoneId zoneId, + ZoneOffset offset +) implements TemporalAccessor { + + @Override + @SuppressWarnings("unchecked") + public R query(TemporalQuery query) { + // shortcut a few queries used by DateFormatters.from + if (query == TemporalQueries.zoneId()) { + return (R) zoneId; + } + if (query == TemporalQueries.offset()) { + return (R) offset; + } + if (query == DateFormatters.LOCAL_DATE_QUERY || query == TemporalQueries.localDate()) { + if (months != null && days != null) { + return (R) LocalDate.of(years, months, days); + } + return null; + } + if (query == TemporalQueries.localTime()) { + if (hours != null && minutes != null && seconds != null) { + return (R) LocalTime.of(hours, minutes, seconds, nanos != null ? nanos : 0); + } + return null; + } + return TemporalAccessor.super.query(query); + } + + @Override + public boolean isSupported(TemporalField field) { + if (field instanceof ChronoField f) { + return switch (f) { + case YEAR -> true; + case MONTH_OF_YEAR -> months != null; + case DAY_OF_MONTH -> days != null; + case HOUR_OF_DAY -> hours != null; + case MINUTE_OF_HOUR -> minutes != null; + case SECOND_OF_MINUTE -> seconds != null; + case INSTANT_SECONDS -> months != null && days != null && hours != null && minutes != null && seconds != null; + // if the time components are there, we just default nanos to 0 if it's not present + case SECOND_OF_DAY, NANO_OF_SECOND, NANO_OF_DAY -> hours != null && minutes != null && seconds != null; + case OFFSET_SECONDS -> offset != null; + default -> false; + }; + } + + return field.isSupportedBy(this); + } + + @Override + public long getLong(TemporalField field) { + if (field instanceof ChronoField f) { + switch (f) { + case YEAR -> { + return years; + } + case MONTH_OF_YEAR -> { + return extractValue(f, months); + } + case DAY_OF_MONTH -> { + return extractValue(f, days); + } + case HOUR_OF_DAY -> { + return extractValue(f, hours); + } + case MINUTE_OF_HOUR -> { + return extractValue(f, minutes); + } + case SECOND_OF_MINUTE -> { + return extractValue(f, seconds); + } + case INSTANT_SECONDS -> { + if (isSupported(ChronoField.INSTANT_SECONDS) == false) { + throw new UnsupportedTemporalTypeException("No " + f + " value available"); + } + return LocalDateTime.of(years, months, days, hours, minutes, seconds) + .toEpochSecond(offset != null ? offset : ZoneOffset.UTC); + } + case SECOND_OF_DAY -> { + if (isSupported(ChronoField.SECOND_OF_DAY) == false) { + throw new UnsupportedTemporalTypeException("No " + f + " value available"); + } + return LocalTime.of(hours, minutes, seconds).toSecondOfDay(); + } + case NANO_OF_SECOND -> { + if (isSupported(ChronoField.NANO_OF_SECOND) == false) { + throw new UnsupportedTemporalTypeException("No " + f + " value available"); + } + return nanos != null ? nanos.longValue() : 0L; + } + case NANO_OF_DAY -> { + if (isSupported(ChronoField.NANO_OF_DAY) == false) { + throw new UnsupportedTemporalTypeException("No " + f + " value available"); + } + return LocalTime.of(hours, minutes, seconds, nanos != null ? nanos : 0).toNanoOfDay(); + } + case OFFSET_SECONDS -> { + if (offset == null) { + throw new UnsupportedTemporalTypeException("No " + f + " value available"); + } + return offset.getTotalSeconds(); + } + default -> throw new UnsupportedTemporalTypeException("No " + f + " value available"); + } + } + + return field.getFrom(this); + } + + private static long extractValue(ChronoField field, Number value) { + if (value == null) { + throw new UnsupportedTemporalTypeException("No " + field + " value available"); + } + return value.longValue(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/time/Iso8601DateTimeParser.java b/server/src/main/java/org/elasticsearch/common/time/Iso8601DateTimeParser.java new file mode 100644 index 0000000000000..2a526a36408ce --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/Iso8601DateTimeParser.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +import java.time.ZoneId; +import java.time.format.DateTimeParseException; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +class Iso8601DateTimeParser implements DateTimeParser { + + private final Iso8601Parser parser; + private final ZoneId timezone; + // the locale doesn't actually matter, as we're parsing in a standardised format + // and we already account for . or , in decimals + private final Locale locale; + + Iso8601DateTimeParser(Set mandatoryFields, boolean optionalTime) { + parser = new Iso8601Parser(mandatoryFields, optionalTime, Map.of()); + timezone = null; + locale = null; + } + + private Iso8601DateTimeParser(Iso8601Parser parser, ZoneId timezone, Locale locale) { + this.parser = parser; + this.timezone = timezone; + this.locale = locale; + } + + @Override + public ZoneId getZone() { + return timezone; + } + + @Override + public Locale getLocale() { + return locale; + } + + @Override + public DateTimeParser withZone(ZoneId zone) { + return new Iso8601DateTimeParser(parser, zone, locale); + } + + @Override + public DateTimeParser withLocale(Locale locale) { + return new Iso8601DateTimeParser(parser, timezone, locale); + } + + Iso8601DateTimeParser withDefaults(Map defaults) { + return new Iso8601DateTimeParser(new Iso8601Parser(parser.mandatoryFields(), parser.optionalTime(), defaults), timezone, locale); + } + + @Override + public TemporalAccessor parse(CharSequence str) { + var result = parser.tryParse(str, timezone); + var temporal = result.result(); + if (temporal == null) { + throw new DateTimeParseException("Could not fully parse datetime", str, result.errorIndex()); + } + return temporal; + } + + @Override + public Optional tryParse(CharSequence str) { + return Optional.ofNullable(parser.tryParse(str, timezone).result()); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java b/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java new file mode 100644 index 0000000000000..4f1d131dd8ced --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/time/Iso8601Parser.java @@ -0,0 +1,521 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +import org.elasticsearch.core.Nullable; + +import java.time.DateTimeException; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.temporal.ChronoField; +import java.util.EnumMap; +import java.util.EnumSet; +import java.util.Map; +import java.util.Set; + +/** + * Parses datetimes in ISO8601 format (and subsequences thereof). + *

+ * This is faster than the generic parsing in {@link java.time.format.DateTimeFormatter}, as this is hard-coded and specific to ISO-8601. + * Various public libraries provide their own variant of this mechanism. We use our own for a few reasons: + *

    + *
  • + * We are historically a bit more lenient with strings that are invalid according to the strict specification + * (eg using a zone region instead of offset for timezone) + *
  • + *
  • Various built-in formats specify some fields as mandatory and some as optional
  • + *
  • Callers can specify defaults for fields that are not present (eg for roundup parsers)
  • + *
+ * We also do not use exceptions here, instead returning {@code null} for any invalid values, that are then + * checked and propagated as appropriate. + */ +class Iso8601Parser { + + /** + * The result of the parse. If successful, {@code result} will be non-null. + * If parse failed, {@code errorIndex} specifies the index into the parsed string + * that the first invalid data was encountered. + */ + record Result(@Nullable DateTime result, int errorIndex) { + Result(DateTime result) { + this(result, -1); + } + + static Result error(int errorIndex) { + return new Result(null, errorIndex); + } + } + + private static final Set VALID_MANDATORY_FIELDS = EnumSet.of( + ChronoField.YEAR, + ChronoField.MONTH_OF_YEAR, + ChronoField.DAY_OF_MONTH, + ChronoField.HOUR_OF_DAY, + ChronoField.MINUTE_OF_HOUR, + ChronoField.SECOND_OF_MINUTE + ); + + private static final Set VALID_DEFAULT_FIELDS = EnumSet.of( + ChronoField.MONTH_OF_YEAR, + ChronoField.DAY_OF_MONTH, + ChronoField.HOUR_OF_DAY, + ChronoField.MINUTE_OF_HOUR, + ChronoField.SECOND_OF_MINUTE, + ChronoField.NANO_OF_SECOND + ); + + private final Set mandatoryFields; + private final boolean optionalTime; + private final Map defaults; + + /** + * Constructs a new {@code Iso8601Parser} object + * + * @param mandatoryFields + * The set of fields that must be present for a valid parse. These should be specified in field order + * (eg if {@link ChronoField#DAY_OF_MONTH} is specified, {@link ChronoField#MONTH_OF_YEAR} should also be specified). + * {@link ChronoField#YEAR} is always mandatory. + * @param optionalTime + * {@code false} if the presence of time fields follows {@code mandatoryFields}, + * {@code true} if a time component is always optional, despite the presence of time fields in {@code mandatoryFields}. + * This makes it possible to specify 'time is optional, but if it is present, it must have these fields' + * by settings {@code optionalTime = true} and putting time fields such as {@link ChronoField#HOUR_OF_DAY} + * and {@link ChronoField#MINUTE_OF_HOUR} in {@code mandatoryFields}. + * @param defaults + * Map of default field values, if they are not present in the parsed string. + */ + Iso8601Parser(Set mandatoryFields, boolean optionalTime, Map defaults) { + checkChronoFields(mandatoryFields, VALID_MANDATORY_FIELDS); + checkChronoFields(defaults.keySet(), VALID_DEFAULT_FIELDS); + + this.mandatoryFields = EnumSet.of(ChronoField.YEAR); // year is always mandatory + this.mandatoryFields.addAll(mandatoryFields); + this.optionalTime = optionalTime; + this.defaults = defaults.isEmpty() ? Map.of() : new EnumMap<>(defaults); + } + + private static void checkChronoFields(Set fields, Set validFields) { + if (fields.isEmpty()) return; // nothing to check + + fields = EnumSet.copyOf(fields); + fields.removeAll(validFields); + if (fields.isEmpty() == false) { + throw new IllegalArgumentException("Invalid chrono fields specified " + fields); + } + } + + boolean optionalTime() { + return optionalTime; + } + + Set mandatoryFields() { + return mandatoryFields; + } + + private boolean isOptional(ChronoField field) { + return mandatoryFields.contains(field) == false; + } + + private Integer defaultZero(ChronoField field) { + return defaults.getOrDefault(field, 0); + } + + /** + * Attempts to parse {@code str} as an ISO-8601 datetime, returning a {@link Result} indicating if the parse + * was successful or not, and what fields were present. + * @param str The string to parse + * @param defaultTimezone The default timezone to return, if no timezone is present in the string + * @return The {@link Result} of the parse. + */ + Result tryParse(CharSequence str, @Nullable ZoneId defaultTimezone) { + if (str.charAt(0) == '-') { + // the year is negative. This is most unusual. + // Instead of always adding offsets and dynamically calculating position in the main parser code below, + // just in case it starts with a -, just parse the substring, then adjust the output appropriately + Result result = parse(new CharSubSequence(str, 1, str.length()), defaultTimezone); + + if (result.errorIndex() >= 0) { + return Result.error(result.errorIndex() + 1); + } else { + DateTime dt = result.result(); + return new Result( + new DateTime( + -dt.years(), + dt.months(), + dt.days(), + dt.hours(), + dt.minutes(), + dt.seconds(), + dt.nanos(), + dt.zoneId(), + dt.offset() + ) + ); + } + } else { + return parse(str, defaultTimezone); + } + } + + /** + * Index {@code i} is the multiplicand to get the number of nanos from the fractional second with {@code i=9-d} digits. + */ + private static final int[] NANO_MULTIPLICANDS = new int[] { 1, 10, 100, 1_000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000 }; + + /** + * Parses {@code str} in ISO8601 format. + *

+ * This parses the string using fixed offsets (it does not support variable-width fields) and separators, + * sequentially parsing each field and looking for the correct separator. + * This enables it to be very fast, as all the fields are in fixed places in the string. + * The only variable aspect comes from the timezone, which (fortunately) is only present at the end of the string, + * at any point after a time field. + * It also does not use exceptions, instead returning {@code null} where a value cannot be parsed. + */ + private Result parse(CharSequence str, @Nullable ZoneId defaultTimezone) { + int len = str.length(); + + // YEARS + Integer years = parseInt(str, 0, 4); + if (years == null) return Result.error(0); + if (len == 4) { + return isOptional(ChronoField.MONTH_OF_YEAR) + ? new Result( + withZoneOffset( + years, + defaults.get(ChronoField.MONTH_OF_YEAR), + defaults.get(ChronoField.DAY_OF_MONTH), + defaults.get(ChronoField.HOUR_OF_DAY), + defaults.get(ChronoField.MINUTE_OF_HOUR), + defaults.get(ChronoField.SECOND_OF_MINUTE), + defaults.get(ChronoField.NANO_OF_SECOND), + defaultTimezone + ) + ) + : Result.error(4); + } + + if (str.charAt(4) != '-') return Result.error(4); + + // MONTHS + Integer months = parseInt(str, 5, 7); + if (months == null || months > 12) return Result.error(5); + if (len == 7) { + return isOptional(ChronoField.DAY_OF_MONTH) + ? new Result( + withZoneOffset( + years, + months, + defaults.get(ChronoField.DAY_OF_MONTH), + defaults.get(ChronoField.HOUR_OF_DAY), + defaults.get(ChronoField.MINUTE_OF_HOUR), + defaults.get(ChronoField.SECOND_OF_MINUTE), + defaults.get(ChronoField.NANO_OF_SECOND), + defaultTimezone + ) + ) + : Result.error(7); + } + + if (str.charAt(7) != '-') return Result.error(7); + + // DAYS + Integer days = parseInt(str, 8, 10); + if (days == null || days > 31) return Result.error(8); + if (len == 10) { + return optionalTime || isOptional(ChronoField.HOUR_OF_DAY) + ? new Result( + withZoneOffset( + years, + months, + days, + defaults.get(ChronoField.HOUR_OF_DAY), + defaults.get(ChronoField.MINUTE_OF_HOUR), + defaults.get(ChronoField.SECOND_OF_MINUTE), + defaults.get(ChronoField.NANO_OF_SECOND), + defaultTimezone + ) + ) + : Result.error(10); + } + + if (str.charAt(10) != 'T') return Result.error(10); + if (len == 11) { + return isOptional(ChronoField.HOUR_OF_DAY) + ? new Result( + withZoneOffset( + years, + months, + days, + defaults.get(ChronoField.HOUR_OF_DAY), + defaults.get(ChronoField.MINUTE_OF_HOUR), + defaults.get(ChronoField.SECOND_OF_MINUTE), + defaults.get(ChronoField.NANO_OF_SECOND), + defaultTimezone + ) + ) + : Result.error(11); + } + + // HOURS + timezone + Integer hours = parseInt(str, 11, 13); + if (hours == null || hours > 23) return Result.error(11); + if (len == 13) { + return isOptional(ChronoField.MINUTE_OF_HOUR) + ? new Result( + withZoneOffset( + years, + months, + days, + hours, + defaultZero(ChronoField.MINUTE_OF_HOUR), + defaultZero(ChronoField.SECOND_OF_MINUTE), + defaultZero(ChronoField.NANO_OF_SECOND), + defaultTimezone + ) + ) + : Result.error(13); + } + if (isZoneId(str, 13)) { + ZoneId timezone = parseZoneId(str, 13); + return timezone != null && isOptional(ChronoField.MINUTE_OF_HOUR) + ? new Result( + withZoneOffset( + years, + months, + days, + hours, + defaultZero(ChronoField.MINUTE_OF_HOUR), + defaultZero(ChronoField.SECOND_OF_MINUTE), + defaultZero(ChronoField.NANO_OF_SECOND), + timezone + ) + ) + : Result.error(13); + } + + if (str.charAt(13) != ':') return Result.error(13); + + // MINUTES + timezone + Integer minutes = parseInt(str, 14, 16); + if (minutes == null || minutes > 59) return Result.error(14); + if (len == 16) { + return isOptional(ChronoField.SECOND_OF_MINUTE) + ? new Result( + withZoneOffset( + years, + months, + days, + hours, + minutes, + defaultZero(ChronoField.SECOND_OF_MINUTE), + defaultZero(ChronoField.NANO_OF_SECOND), + defaultTimezone + ) + ) + : Result.error(16); + } + if (isZoneId(str, 16)) { + ZoneId timezone = parseZoneId(str, 16); + return timezone != null && isOptional(ChronoField.SECOND_OF_MINUTE) + ? new Result( + withZoneOffset( + years, + months, + days, + hours, + minutes, + defaultZero(ChronoField.SECOND_OF_MINUTE), + defaultZero(ChronoField.NANO_OF_SECOND), + timezone + ) + ) + : Result.error(16); + } + + if (str.charAt(16) != ':') return Result.error(16); + + // SECONDS + timezone + Integer seconds = parseInt(str, 17, 19); + if (seconds == null || seconds > 59) return Result.error(17); + if (len == 19) { + return new Result( + withZoneOffset(years, months, days, hours, minutes, seconds, defaultZero(ChronoField.NANO_OF_SECOND), defaultTimezone) + ); + } + if (isZoneId(str, 19)) { + ZoneId timezone = parseZoneId(str, 19); + return timezone != null + ? new Result( + withZoneOffset(years, months, days, hours, minutes, seconds, defaultZero(ChronoField.NANO_OF_SECOND), timezone) + ) + : Result.error(19); + } + + char decSeparator = str.charAt(19); + if (decSeparator != '.' && decSeparator != ',') return Result.error(19); + + // NANOS + timezone + // nanos are always optional + // the last number could be millis or nanos, or any combination in the middle + // so we keep parsing numbers until we get to not a number + int nanos = 0; + int pos; + for (pos = 20; pos < len && pos < 29; pos++) { + char c = str.charAt(pos); + if (c < ZERO || c > NINE) break; + nanos = nanos * 10 + (c - ZERO); + } + + if (pos == 20) return Result.error(20); // didn't find a number at all + + // multiply it by the correct multiplicand to get the nanos + nanos *= NANO_MULTIPLICANDS[29 - pos]; + + if (len == pos) { + return new Result(withZoneOffset(years, months, days, hours, minutes, seconds, nanos, defaultTimezone)); + } + if (isZoneId(str, pos)) { + ZoneId timezone = parseZoneId(str, pos); + return timezone != null + ? new Result(withZoneOffset(years, months, days, hours, minutes, seconds, nanos, timezone)) + : Result.error(pos); + } + + // still chars left at the end - string is not valid + return Result.error(pos); + } + + private static boolean isZoneId(CharSequence str, int pos) { + // all region zoneIds must start with [A-Za-z] (see ZoneId#of) + // this also covers Z and UT/UTC/GMT zone variants + char c = str.charAt(pos); + return c == '+' || c == '-' || (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); + } + + /** + * This parses the zone offset, which is of the format accepted by {@link java.time.ZoneId#of(String)}. + * It has fast paths for numerical offsets, but falls back on {@code ZoneId.of} for non-trivial zone ids. + */ + private ZoneId parseZoneId(CharSequence str, int pos) { + int len = str.length(); + char first = str.charAt(pos); + + if (first == 'Z' && len == pos + 1) { + return ZoneOffset.UTC; + } + + boolean positive; + switch (first) { + case '+' -> positive = true; + case '-' -> positive = false; + default -> { + // non-trivial zone offset, fallback on the built-in java zoneid parser + try { + return ZoneId.of(str.subSequence(pos, str.length()).toString()); + } catch (DateTimeException e) { + return null; + } + } + } + pos++; // read the + or - + + Integer hours = parseInt(str, pos, pos += 2); + if (hours == null) return null; + if (len == pos) return ofHoursMinutesSeconds(hours, 0, 0, positive); + + boolean hasColon = false; + if (str.charAt(pos) == ':') { + pos++; + hasColon = true; + } + + Integer minutes = parseInt(str, pos, pos += 2); + if (minutes == null) return null; + if (len == pos) return ofHoursMinutesSeconds(hours, minutes, 0, positive); + + // either both dividers have a colon, or neither do + if ((str.charAt(pos) == ':') != hasColon) return null; + if (hasColon) { + pos++; + } + + Integer seconds = parseInt(str, pos, pos += 2); + if (seconds == null) return null; + if (len == pos) return ofHoursMinutesSeconds(hours, minutes, seconds, positive); + + // there's some text left over... + return null; + } + + /* + * ZoneOffset.ofTotalSeconds has a ConcurrentHashMap cache of offsets. This is fine, + * but it does mean there's an expensive map lookup every time we call ofTotalSeconds. + * There's no way to get round that, but we can at least have a very quick last-value cache here + * to avoid doing a full map lookup when there's lots of timestamps with the same offset being parsed + */ + private final ThreadLocal lastOffset = ThreadLocal.withInitial(() -> ZoneOffset.UTC); + + private ZoneOffset ofHoursMinutesSeconds(int hours, int minutes, int seconds, boolean positive) { + int totalSeconds = hours * 3600 + minutes * 60 + seconds; + if (positive == false) { + totalSeconds = -totalSeconds; + } + + // check the lastOffset value + ZoneOffset lastOffset = this.lastOffset.get(); + if (totalSeconds == lastOffset.getTotalSeconds()) { + return lastOffset; + } + + try { + ZoneOffset offset = ZoneOffset.ofTotalSeconds(totalSeconds); + this.lastOffset.set(lastOffset); + return offset; + } catch (DateTimeException e) { + // zoneoffset is out of range + return null; + } + } + + /** + * Create a {@code DateTime} object, with the ZoneOffset field set when the zone is an offset, not just an id. + */ + private static DateTime withZoneOffset( + int years, + Integer months, + Integer days, + Integer hours, + Integer minutes, + Integer seconds, + Integer nanos, + ZoneId zoneId + ) { + if (zoneId instanceof ZoneOffset zo) { + return new DateTime(years, months, days, hours, minutes, seconds, nanos, zoneId, zo); + } else { + return new DateTime(years, months, days, hours, minutes, seconds, nanos, zoneId, null); + } + } + + private static final char ZERO = '0'; + private static final char NINE = '9'; + + private static Integer parseInt(CharSequence str, int startInclusive, int endExclusive) { + if (str.length() < endExclusive) return null; + + int result = 0; + for (int i = startInclusive; i < endExclusive; i++) { + char c = str.charAt(i); + if (c < ZERO || c > NINE) return null; + result = result * 10 + (c - ZERO); + } + return result; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index 9c39ee51276d7..707b07c1d68d9 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -21,15 +21,21 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Objects; import java.util.function.UnaryOperator; +import static java.util.Map.entry; + class JavaDateFormatter implements DateFormatter { @SuppressWarnings("unchecked") private static T defaultRoundUp(T parser) { if (parser instanceof JavaTimeDateTimeParser jtp) { return (T) defaultRoundUp(jtp); } + if (parser instanceof Iso8601DateTimeParser iso) { + return (T) defaultRoundUp(iso); + } throw new IllegalArgumentException("Unknown parser implementation " + parser.getClass()); } @@ -78,6 +84,19 @@ private static JavaTimeDateTimeParser defaultRoundUp(JavaTimeDateTimeParser pars return new JavaTimeDateTimeParser(builder.toFormatter(parser.getLocale())); } + private static Iso8601DateTimeParser defaultRoundUp(Iso8601DateTimeParser parser) { + return parser.withDefaults( + Map.ofEntries( + entry(ChronoField.MONTH_OF_YEAR, 1), + entry(ChronoField.DAY_OF_MONTH, 1), + entry(ChronoField.HOUR_OF_DAY, 23), + entry(ChronoField.MINUTE_OF_HOUR, 59), + entry(ChronoField.SECOND_OF_MINUTE, 59), + entry(ChronoField.NANO_OF_SECOND, 999_999_999) + ) + ); + } + private final String format; private final DateTimePrinter printer; private final DateTimeParser[] parsers; diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java b/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java index e67196c9090c9..bbf7cc3e0e1e9 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.MapperService; import java.io.IOException; @@ -31,20 +32,20 @@ public interface IndexSettingProvider { * @param indexName The name of the new index being created * @param dataStreamName The name of the data stream if the index being created is part of a data stream otherwise * null - * @param timeSeries Whether the template is in time series mode. + * @param isTimeSeries Whether the template is in time series mode. * @param metadata The current metadata instance that doesn't yet contain the index to be created * @param resolvedAt The time the request to create this new index was accepted. - * @param allSettings All the setting resolved from the template that matches and any setting defined on the create index - * request + * @param indexTemplateAndCreateRequestSettings All the settings resolved from the template that matches and any settings + * defined on the create index request * @param combinedTemplateMappings All the mappings resolved from the template that matches */ Settings getAdditionalIndexSettings( String indexName, - String dataStreamName, - boolean timeSeries, + @Nullable String dataStreamName, + boolean isTimeSeries, Metadata metadata, Instant resolvedAt, - Settings allSettings, + Settings indexTemplateAndCreateRequestSettings, List combinedTemplateMappings ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryDocValuesSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryDocValuesSyntheticFieldLoader.java index 0e6f117266e35..c3eb0c4c0290a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryDocValuesSyntheticFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryDocValuesSyntheticFieldLoader.java @@ -58,4 +58,9 @@ public void write(XContentBuilder b) throws IOException { writeValue(b, values.binaryValue()); } + + @Override + public String fieldName() { + return name; + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java index af341e64661d1..a7283cf0a28ec 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java @@ -178,5 +178,10 @@ public void write(XContentBuilder b) throws IOException { } b.field(NAME, postings.freq()); } + + @Override + public String fieldName() { + return NAME; + } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 9476c3e719e0b..08421af332fe4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -838,7 +838,15 @@ protected String contentType() { private static class NoOpObjectMapper extends ObjectMapper { NoOpObjectMapper(String name, String fullPath) { - super(name, fullPath, Explicit.IMPLICIT_TRUE, Explicit.IMPLICIT_TRUE, Dynamic.RUNTIME, Collections.emptyMap()); + super( + name, + fullPath, + Explicit.IMPLICIT_TRUE, + Explicit.IMPLICIT_TRUE, + Explicit.IMPLICIT_FALSE, + Dynamic.RUNTIME, + Collections.emptyMap() + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java index b5de3971fa091..acfe0fcfbf5bd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -21,6 +21,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.Map; +import java.util.stream.Stream; /** * Wrapper around everything that defines a mapping, without references to @@ -125,7 +126,8 @@ private boolean isSourceSynthetic() { } public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - return root.syntheticFieldLoader(Arrays.stream(metadataMappers)); + var stream = Stream.concat(Stream.of(metadataMappers), root.mappers.values().stream()); + return root.syntheticFieldLoader(stream); } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java index 5c2880a4bf760..a8955e46f0ad4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedObjectMapper.java @@ -149,7 +149,7 @@ public MapperBuilderContext createChildContext(String name, Dynamic dynamic) { String nestedTypePath, Query nestedTypeFilter ) { - super(name, fullPath, enabled, Explicit.IMPLICIT_TRUE, dynamic, mappers); + super(name, fullPath, enabled, Explicit.IMPLICIT_TRUE, Explicit.IMPLICIT_FALSE, dynamic, mappers); this.nestedTypePath = nestedTypePath; this.nestedTypeFilter = nestedTypeFilter; this.includeInParent = includeInParent; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 6d5a43ae41bd0..6336e6ca0b764 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -37,10 +37,12 @@ public class ObjectMapper extends Mapper { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ObjectMapper.class); public static final String CONTENT_TYPE = "object"; + static final String STORE_ARRAY_SOURCE_PARAM = "store_array_source"; public static class Defaults { public static final boolean ENABLED = true; public static final Explicit SUBOBJECTS = Explicit.IMPLICIT_TRUE; + public static final Explicit TRACK_ARRAY_SOURCE = Explicit.IMPLICIT_FALSE; public static final Dynamic DYNAMIC = Dynamic.TRUE; } @@ -78,6 +80,7 @@ static Dynamic getRootDynamic(MappingLookup mappingLookup) { public static class Builder extends Mapper.Builder { protected final Explicit subobjects; protected Explicit enabled = Explicit.IMPLICIT_TRUE; + protected Explicit trackArraySource = Defaults.TRACK_ARRAY_SOURCE; protected Dynamic dynamic; protected final List mappersBuilders = new ArrayList<>(); @@ -91,6 +94,11 @@ public Builder enabled(boolean enabled) { return this; } + public Builder trackArraySource(boolean value) { + this.trackArraySource = Explicit.explicitBoolean(value); + return this; + } + public Builder dynamic(Dynamic dynamic) { this.dynamic = dynamic; return this; @@ -182,6 +190,7 @@ public ObjectMapper build(MapperBuilderContext context) { context.buildFullName(name()), enabled, subobjects, + trackArraySource, dynamic, buildMappers(context.createChildContext(name(), dynamic)) ); @@ -242,6 +251,9 @@ protected static boolean parseObjectOrDocumentTypeProperties( } else if (fieldName.equals("enabled")) { builder.enabled(XContentMapValues.nodeBooleanValue(fieldNode, fieldName + ".enabled")); return true; + } else if (fieldName.equals(STORE_ARRAY_SOURCE_PARAM)) { + builder.trackArraySource(XContentMapValues.nodeBooleanValue(fieldNode, fieldName + ".track_array_source")); + return true; } else if (fieldName.equals("properties")) { if (fieldNode instanceof Collection && ((Collection) fieldNode).isEmpty()) { // nothing to do here, empty (to support "properties: []" case) @@ -369,6 +381,7 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate protected final Explicit enabled; protected final Explicit subobjects; + protected final Explicit trackArraySource; protected final Dynamic dynamic; protected final Map mappers; @@ -378,6 +391,7 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate String fullPath, Explicit enabled, Explicit subobjects, + Explicit trackArraySource, Dynamic dynamic, Map mappers ) { @@ -387,6 +401,7 @@ private static void validateFieldName(String fieldName, IndexVersion indexCreate this.fullPath = internFieldName(fullPath); this.enabled = enabled; this.subobjects = subobjects; + this.trackArraySource = trackArraySource; this.dynamic = dynamic; if (mappers == null) { this.mappers = Map.of(); @@ -412,7 +427,7 @@ public ObjectMapper.Builder newBuilder(IndexVersion indexVersionCreated) { * This is typically used in the context of a mapper merge when there's not enough budget to add the entire object. */ ObjectMapper withoutMappers() { - return new ObjectMapper(simpleName(), fullPath, enabled, subobjects, dynamic, Map.of()); + return new ObjectMapper(simpleName(), fullPath, enabled, subobjects, trackArraySource, dynamic, Map.of()); } @Override @@ -454,6 +469,10 @@ public final boolean subobjects() { return subobjects.value(); } + public final boolean trackArraySource() { + return trackArraySource.value(); + } + @Override public void validate(MappingLookup mappers) { for (Mapper mapper : this.mappers.values()) { @@ -480,6 +499,7 @@ public ObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeContex fullPath, mergeResult.enabled, mergeResult.subObjects, + mergeResult.trackArraySource, mergeResult.dynamic, mergeResult.mappers ); @@ -488,6 +508,7 @@ public ObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeContex protected record MergeResult( Explicit enabled, Explicit subObjects, + Explicit trackArraySource, ObjectMapper.Dynamic dynamic, Map mappers ) { @@ -519,11 +540,26 @@ static MergeResult build(ObjectMapper existing, ObjectMapper mergeWithObject, Ma } else { subObjects = existing.subobjects; } + final Explicit trackArraySource; + if (mergeWithObject.trackArraySource.explicit()) { + if (reason == MergeReason.INDEX_TEMPLATE) { + trackArraySource = mergeWithObject.trackArraySource; + } else if (existing.trackArraySource != mergeWithObject.trackArraySource) { + throw new MapperException( + "the [track_array_source] parameter can't be updated for the object mapping [" + existing.name() + "]" + ); + } else { + trackArraySource = existing.trackArraySource; + } + } else { + trackArraySource = existing.trackArraySource; + } MapperMergeContext objectMergeContext = existing.createChildContext(parentMergeContext, existing.simpleName()); Map mergedMappers = buildMergedMappers(existing, mergeWithObject, objectMergeContext, subObjects.value()); return new MergeResult( enabled, subObjects, + trackArraySource, mergeWithObject.dynamic != null ? mergeWithObject.dynamic : existing.dynamic, mergedMappers ); @@ -680,6 +716,9 @@ void toXContent(XContentBuilder builder, Params params, ToXContent custom) throw if (subobjects != Defaults.SUBOBJECTS) { builder.field("subobjects", subobjects.value()); } + if (trackArraySource != Defaults.TRACK_ARRAY_SOURCE) { + builder.field(STORE_ARRAY_SOURCE_PARAM, trackArraySource.value()); + } if (custom != null) { custom.toXContent(builder, params); } @@ -712,19 +751,17 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep } - public SourceLoader.SyntheticFieldLoader syntheticFieldLoader(Stream extra) { - return new SyntheticSourceFieldLoader( - Stream.concat(extra, mappers.values().stream()) - .sorted(Comparator.comparing(Mapper::name)) - .map(Mapper::syntheticFieldLoader) - .filter(l -> l != null) - .toList() - ); + public SourceLoader.SyntheticFieldLoader syntheticFieldLoader(Stream mappers) { + var fields = mappers.sorted(Comparator.comparing(Mapper::name)) + .map(Mapper::syntheticFieldLoader) + .filter(l -> l != SourceLoader.SyntheticFieldLoader.NOTHING) + .toList(); + return new SyntheticSourceFieldLoader(fields); } @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - return syntheticFieldLoader(Stream.empty()); + return syntheticFieldLoader(mappers.values().stream()); } private class SyntheticSourceFieldLoader implements SourceLoader.SyntheticFieldLoader { @@ -830,6 +867,11 @@ public boolean setIgnoredValues(Map timeSeriesDimensionSubFields ) { // Subobjects are not currently supported. - super(name, fullPath, enabled, Explicit.IMPLICIT_FALSE, dynamic, mappers); + super(name, fullPath, enabled, Explicit.IMPLICIT_FALSE, Explicit.IMPLICIT_FALSE, dynamic, mappers); this.timeSeriesDimensionSubFields = timeSeriesDimensionSubFields; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index 9e0680e6e6e6a..c19809760ec43 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -117,6 +117,7 @@ public RootObjectMapper build(MapperBuilderContext context) { name(), enabled, subobjects, + trackArraySource, dynamic, mappers, new HashMap<>(runtimeFields), @@ -262,6 +263,7 @@ private static boolean isConflictingObject(Mapper mapper, String[] parts) { String name, Explicit enabled, Explicit subobjects, + Explicit trackArraySource, Dynamic dynamic, Map mappers, Map runtimeFields, @@ -270,7 +272,7 @@ private static boolean isConflictingObject(Mapper mapper, String[] parts) { Explicit dateDetection, Explicit numericDetection ) { - super(name, name, enabled, subobjects, dynamic, mappers); + super(name, name, enabled, subobjects, trackArraySource, dynamic, mappers); this.runtimeFields = runtimeFields; this.dynamicTemplates = dynamicTemplates; this.dynamicDateTimeFormatters = dynamicDateTimeFormatters; @@ -292,6 +294,7 @@ RootObjectMapper withoutMappers() { simpleName(), enabled, subobjects, + trackArraySource, dynamic, Map.of(), Map.of(), @@ -407,6 +410,7 @@ public RootObjectMapper merge(Mapper mergeWith, MapperMergeContext parentMergeCo simpleName(), mergeResult.enabled(), mergeResult.subObjects(), + mergeResult.trackArraySource(), mergeResult.dynamic(), mergeResult.mappers(), Map.copyOf(runtimeFields), diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java index c3ebe079e886e..96ba151472a03 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SortedNumericDocValuesSyntheticFieldLoader.java @@ -232,4 +232,9 @@ public static SortedNumericDocValues docValuesOrNull(LeafReader reader, String f } return null; } + + @Override + public String fieldName() { + return name; + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoader.java index 37b6fe72c3089..335e551365931 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SortedSetDocValuesSyntheticFieldLoader.java @@ -69,6 +69,11 @@ public SortedSetDocValuesSyntheticFieldLoader( : IgnoreMalformedStoredValues.empty(); } + @Override + public String fieldName() { + return name; + } + @Override public Stream> storedFieldLoaders() { if (storedValuesName == null) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java index d8879338bea1e..dea3494f408d9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java @@ -215,6 +215,11 @@ public boolean hasValue() { @Override public void write(XContentBuilder b) {} + + @Override + public String fieldName() { + return ""; + } }; /** @@ -242,10 +247,20 @@ public void write(XContentBuilder b) {} */ void write(XContentBuilder b) throws IOException; + /** + * Allows for identifying and tracking additional field values to include in the field source. + * @param objectsWithIgnoredFields maps object names to lists of fields they contain with special source handling + * @return true if any matching fields are identified + */ default boolean setIgnoredValues(Map> objectsWithIgnoredFields) { return false; } + /** + * Returns the canonical field name for this loader. + */ + String fieldName(); + /** * Sync for stored field values. */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java index 6ae7c5f20233e..b26aed11233f9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/StringStoredFieldFieldLoader.java @@ -89,4 +89,9 @@ public final void write(XContentBuilder b) throws IOException { public final DocValuesLoader docValuesLoader(LeafReader reader, int[] docIdsInLeaf) throws IOException { return null; } + + @Override + public String fieldName() { + return name; + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 5159a76206ef6..9ecd68ec27803 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -1671,6 +1671,11 @@ public void write(XContentBuilder b) throws IOException { } b.endArray(); } + + @Override + public String fieldName() { + return name(); + } } private class DocValuesSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { @@ -1721,5 +1726,10 @@ public void write(XContentBuilder b) throws IOException { } b.endArray(); } + + @Override + public String fieldName() { + return name(); + } } } diff --git a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java index 44e86e056ef3b..7ab682d3143e7 100644 --- a/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/CompletionPersistentTaskAction.java @@ -53,7 +53,9 @@ public static class Request extends MasterNodeRequest { private String localAbortReason; - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); @@ -64,6 +66,7 @@ public Request(StreamInput in) throws IOException { } public Request(String taskId, long allocationId, Exception exception, String localAbortReason) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.taskId = taskId; this.exception = exception; this.allocationId = allocationId; diff --git a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java index 1fbdd03dcc268..26cf0658f60b9 100644 --- a/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/RemovePersistentTaskAction.java @@ -41,7 +41,9 @@ public static class Request extends MasterNodeRequest { private String taskId; - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); @@ -49,6 +51,7 @@ public Request(StreamInput in) throws IOException { } public Request(String taskId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.taskId = taskId; } diff --git a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java index 299891c64711a..ce0e46e7b0425 100644 --- a/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/StartPersistentTaskAction.java @@ -51,7 +51,9 @@ public static class Request extends MasterNodeRequest { private PersistentTaskParams params; - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); @@ -61,6 +63,7 @@ public Request(StreamInput in) throws IOException { } public Request(String taskId, String taskName, PersistentTaskParams params) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.taskId = taskId; this.taskName = taskName; this.params = params; diff --git a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java index dcf86f85eb709..6ecefa1bbf847 100644 --- a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java @@ -45,7 +45,9 @@ public static class Request extends MasterNodeRequest { private long allocationId = -1L; private PersistentTaskState state; - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); @@ -55,6 +57,7 @@ public Request(StreamInput in) throws IOException { } public Request(String taskId, long allocationId, PersistentTaskState state) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.taskId = taskId; this.allocationId = allocationId; this.state = state; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 8ccc100e31501..dac5ab97f2962 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -984,6 +984,11 @@ class SnapshotsDeletion { // NB only counts stale root blobs today, not shard-level blobs private final AtomicLong bytesDeleted = new AtomicLong(); + /** + * Tracks the shard-level blobs which can be deleted once all the metadata updates have completed. + */ + private final ShardBlobsToDelete shardBlobsToDelete = new ShardBlobsToDelete(); + SnapshotsDeletion( Collection snapshotIds, long originalRepositoryDataGeneration, @@ -1001,36 +1006,6 @@ class SnapshotsDeletion { this.originalRepositoryData = originalRepositoryData; } - /** - * The result of removing a snapshot from a shard folder in the repository. - * - * @param indexId Index that the snapshot was removed from - * @param shardId Shard id that the snapshot was removed from - * @param newGeneration Id of the new index-${uuid} blob that does not include the snapshot any more - * @param blobsToDelete Blob names in the shard directory that have become unreferenced in the new shard generation - */ - private record ShardSnapshotMetaDeleteResult( - IndexId indexId, - int shardId, - ShardGeneration newGeneration, - Collection blobsToDelete - ) {} - - /** - *

- * Shard-level results, see {@link ShardSnapshotMetaDeleteResult}. - *

- *

- * Writes to this list are all synchronized (via {@link #addShardDeleteResult}), and happen-before it is read so the reads need - * no further synchronization - *

- */ - private final List shardDeleteResults = new ArrayList<>(); - - private synchronized void addShardDeleteResult(ShardSnapshotMetaDeleteResult shardDeleteResult) { - shardDeleteResults.add(shardDeleteResult); - } - // --------------------------------------------------------------------------------------------------------------------------------- // The overall flow of execution @@ -1058,11 +1033,10 @@ private void runWithUniqueShardMetadataNaming(SnapshotDeleteListener listener) { // referenced by the new RepositoryData and will be cleaned up by a subsequent delete. // // TODO should we even write the new RepositoryData unless all shard paths have been successfully updated? See #100569. - final ShardGenerations.Builder builder = ShardGenerations.builder(); - for (ShardSnapshotMetaDeleteResult newGen : shardDeleteResults) { - builder.put(newGen.indexId, newGen.shardId, newGen.newGeneration); - } - updateRepositoryData(originalRepositoryData.removeSnapshots(snapshotIds, builder.build()), l); + updateRepositoryData( + originalRepositoryData.removeSnapshots(snapshotIds, shardBlobsToDelete.getUpdatedShardGenerations()), + l + ); }) .addListener( @@ -1073,7 +1047,7 @@ private void runWithUniqueShardMetadataNaming(SnapshotDeleteListener listener) { // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion try (var refs = new RefCountingRunnable(listener::onDone)) { cleanupUnlinkedRootAndIndicesBlobs(newRepositoryData, refs.acquireListener()); - cleanupUnlinkedShardLevelBlobs(shardDeleteResults, refs.acquireListener()); + cleanupUnlinkedShardLevelBlobs(refs.acquireListener()); } }, listener::onFailure @@ -1098,7 +1072,7 @@ private void runWithLegacyNumericShardMetadataNaming(SnapshotDeleteListener list ActionRunnable.wrap( refs.acquireListener(), l0 -> writeUpdatedShardMetadataAndComputeDeletes( - l0.delegateFailure((l, ignored) -> cleanupUnlinkedShardLevelBlobs(shardDeleteResults, l)) + l0.delegateFailure((l, ignored) -> cleanupUnlinkedShardLevelBlobs(l)) ) ) ); @@ -1264,9 +1238,7 @@ protected void doRun() throws Exception { newGen = tuple.v2() + 1; blobStoreIndexShardSnapshots = tuple.v1(); } - addShardDeleteResult( - deleteFromShardSnapshotMeta(blobStoreIndexShardSnapshots.withRetainedSnapshots(survivingSnapshots), newGen) - ); + deleteFromShardSnapshotMeta(blobStoreIndexShardSnapshots.withRetainedSnapshots(survivingSnapshots), newGen); } /** @@ -1275,14 +1247,11 @@ protected void doRun() throws Exception { * @param indexGeneration generation to write the new shard level level metadata to. If negative a uuid id shard generation * should be used */ - private ShardSnapshotMetaDeleteResult deleteFromShardSnapshotMeta( - BlobStoreIndexShardSnapshots updatedSnapshots, - long indexGeneration - ) { + private void deleteFromShardSnapshotMeta(BlobStoreIndexShardSnapshots updatedSnapshots, long indexGeneration) { ShardGeneration writtenGeneration = null; try { if (updatedSnapshots.snapshots().isEmpty()) { - return new ShardSnapshotMetaDeleteResult( + shardBlobsToDelete.addShardDeleteResult( indexId, shardId, ShardGenerations.DELETED_SHARD_GEN, @@ -1304,7 +1273,7 @@ private ShardSnapshotMetaDeleteResult deleteFromShardSnapshotMeta( final Set survivingSnapshotUUIDs = survivingSnapshots.stream() .map(SnapshotId::getUUID) .collect(Collectors.toSet()); - return new ShardSnapshotMetaDeleteResult( + shardBlobsToDelete.addShardDeleteResult( indexId, shardId, writtenGeneration, @@ -1372,11 +1341,8 @@ private void updateRepositoryData(RepositoryData newRepositoryData, ActionListen // --------------------------------------------------------------------------------------------------------------------------------- // Cleaning up dangling blobs - private void cleanupUnlinkedShardLevelBlobs( - Collection shardDeleteResults, - ActionListener listener - ) { - final Iterator filesToDelete = resolveFilesToDelete(shardDeleteResults); + private void cleanupUnlinkedShardLevelBlobs(ActionListener listener) { + final Iterator filesToDelete = resolveFilesToDelete(); if (filesToDelete.hasNext() == false) { listener.onResponse(null); return; @@ -1392,26 +1358,25 @@ private void cleanupUnlinkedShardLevelBlobs( })); } - private Iterator resolveFilesToDelete(Collection deleteResults) { + private Iterator resolveFilesToDelete() { // Somewhat surprisingly we can construct the String representations of the blobs to delete with BlobPath#buildAsString even // on Windows, because the JDK translates / to \ automatically (and all other blob stores use / as the path separator anyway) final String basePath = basePath().buildAsString(); final int basePathLen = basePath.length(); - return Stream.concat( - // Unreferenced shard-level blobs - deleteResults.stream().flatMap(shardResult -> { - final String shardPath = shardPath(shardResult.indexId, shardResult.shardId).buildAsString(); - return shardResult.blobsToDelete.stream().map(blob -> shardPath + blob); - }), - // Unreferenced index metadata - originalRepositoryData.indexMetaDataToRemoveAfterRemovingSnapshots(snapshotIds).entrySet().stream().flatMap(entry -> { - final String indexContainerPath = indexPath(entry.getKey()).buildAsString(); - return entry.getValue().stream().map(id -> indexContainerPath + INDEX_METADATA_FORMAT.blobName(id)); - }) - ).map(absolutePath -> { + return Iterators.map(Iterators.concat(shardBlobsToDelete.getBlobPaths(), getUnreferencedIndexMetadata()), absolutePath -> { assert absolutePath.startsWith(basePath); return absolutePath.substring(basePathLen); - }).iterator(); + }); + } + + private Iterator getUnreferencedIndexMetadata() { + return Iterators.flatMap( + originalRepositoryData.indexMetaDataToRemoveAfterRemovingSnapshots(snapshotIds).entrySet().iterator(), + entry -> { + final String indexContainerPath = indexPath(entry.getKey()).buildAsString(); + return Iterators.map(entry.getValue().iterator(), id -> indexContainerPath + INDEX_METADATA_FORMAT.blobName(id)); + } + ); } /** @@ -1545,6 +1510,62 @@ private void logStaleRootLevelBlobs( } } + /** + * Tracks the shard-level blobs which can be deleted once all the metadata updates have completed during a snapshot deletion. + */ + class ShardBlobsToDelete { + + /** + * The result of removing a snapshot from a shard folder in the repository. + * + * @param indexId Index that the snapshot was removed from + * @param shardId Shard id that the snapshot was removed from + * @param newGeneration Id of the new index-${uuid} blob that does not include the snapshot any more + * @param blobsToDelete Blob names in the shard directory that have become unreferenced in the new shard generation + */ + private record ShardSnapshotMetaDeleteResult( + IndexId indexId, + int shardId, + ShardGeneration newGeneration, + Collection blobsToDelete + ) {} + + /** + *

+ * Shard-level results, see {@link ShardSnapshotMetaDeleteResult}. + *

+ *

+ * Writes to this list are all synchronized (via {@link #addShardDeleteResult}), and happen-before it is read so the reads need + * no further synchronization + *

+ */ + private final List shardDeleteResults = new ArrayList<>(); + + synchronized void addShardDeleteResult( + IndexId indexId, + int shardId, + ShardGeneration newGeneration, + Collection blobsToDelete + ) { + shardDeleteResults.add(new ShardSnapshotMetaDeleteResult(indexId, shardId, newGeneration, blobsToDelete)); + } + + public ShardGenerations getUpdatedShardGenerations() { + final var builder = ShardGenerations.builder(); + for (var shardResult : shardDeleteResults) { + builder.put(shardResult.indexId, shardResult.shardId, shardResult.newGeneration); + } + return builder.build(); + } + + public Iterator getBlobPaths() { + return Iterators.flatMap(shardDeleteResults.iterator(), shardResult -> { + final var shardPath = shardPath(shardResult.indexId, shardResult.shardId).buildAsString(); + return Iterators.map(shardResult.blobsToDelete.iterator(), blob -> shardPath + blob); + }); + } + } + @Override public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotContext) { final long repositoryStateId = finalizeSnapshotContext.repositoryStateId(); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 4b5c647da0c9a..0c54e8ff89589 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -167,23 +167,35 @@ protected SearchHit nextDoc(int doc) throws IOException { leafSourceLoader, leafIdLoader ); - sourceProvider.source = hit.source(); - fieldLookupProvider.setPreloadedStoredFieldValues(hit.hit().getId(), hit.loadedFields()); - for (FetchSubPhaseProcessor processor : processors) { - processor.process(hit); + boolean success = false; + try { + sourceProvider.source = hit.source(); + fieldLookupProvider.setPreloadedStoredFieldValues(hit.hit().getId(), hit.loadedFields()); + for (FetchSubPhaseProcessor processor : processors) { + processor.process(hit); + } + success = true; + return hit.hit(); + } finally { + if (success == false) { + hit.hit().decRef(); + } } - return hit.hit(); } }; SearchHit[] hits = docsIterator.iterate(context.shardTarget(), context.searcher().getIndexReader(), docIdsToLoad); if (context.isCancelled()) { + for (SearchHit hit : hits) { + // release all hits that would otherwise become owned and eventually released by SearchHits below + hit.decRef(); + } throw new TaskCancelledException("cancelled"); } TotalHits totalHits = context.getTotalHits(); - return SearchHits.unpooled(hits, totalHits, context.getMaxScore()); + return new SearchHits(hits, totalHits, context.getMaxScore()); } List getProcessors(SearchShardTarget target, FetchContext context, Profiler profiler) { @@ -257,12 +269,12 @@ private static HitContext prepareNonNestedHitContext( String id = idLoader.getId(subDocId); if (id == null) { - // TODO: can we use pooled buffers here as well? - SearchHit hit = SearchHit.unpooled(docId, null); + SearchHit hit = new SearchHit(docId); + // TODO: can we use real pooled buffers here as well? Source source = Source.lazy(lazyStoredSourceLoader(profiler, subReaderContext, subDocId)); return new HitContext(hit, subReaderContext, subDocId, Map.of(), source); } else { - SearchHit hit = SearchHit.unpooled(docId, id); + SearchHit hit = new SearchHit(docId, id); Source source; if (requiresSource) { Timer timer = profiler.startLoadingSource(); @@ -339,7 +351,7 @@ private static HitContext prepareNestedHitContext( assert nestedIdentity != null; Source nestedSource = nestedIdentity.extractSource(rootSource); - SearchHit hit = SearchHit.unpooled(topDocId, rootId, nestedIdentity); + SearchHit hit = new SearchHit(topDocId, rootId, nestedIdentity); return new HitContext(hit, subReaderContext, nestedInfo.doc(), childFieldLoader.storedFields(), nestedSource); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java index cc39113f2009f..81b3e7465feee 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java @@ -67,6 +67,7 @@ public final SearchHit[] iterate(SearchShardTarget shardTarget, IndexReader inde setNextReader(ctx, docsInLeaf); } currentDoc = docs[i].docId; + assert searchHits[docs[i].index] == null; searchHits[docs[i].index] = nextDoc(docs[i].docId); } } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java index 4c3d3948ff889..4170f7e2f8b4b 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -61,8 +61,13 @@ public FetchSearchResult fetchResult() { public void shardResult(SearchHits hits, ProfileResult profileResult) { assert assertNoSearchTarget(hits); + assert hasReferences(); + var existing = this.hits; + if (existing != null) { + existing.decRef(); + } this.hits = hits; - hits.incRef(); + hits.mustIncRef(); assert this.profileResult == null; this.profileResult = profileResult; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java index ccb54801472a6..a4ba982e1dd73 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java @@ -104,6 +104,7 @@ private void hitExecute(Map innerHi } } var h = fetchResult.hits(); + assert hit.isPooled() || h.isPooled() == false; results.put(entry.getKey(), h); h.mustIncRef(); } diff --git a/server/src/main/java/org/elasticsearch/search/lookup/EmptySource.java b/server/src/main/java/org/elasticsearch/search/lookup/EmptySource.java new file mode 100644 index 0000000000000..bfaf9620ade74 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/lookup/EmptySource.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.lookup; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.EnumMap; +import java.util.Map; + +final class EmptySource implements Source { + + private static final EnumMap values = new EnumMap<>(XContentType.class); + + static { + for (XContentType value : XContentType.values()) { + values.put(value, new EmptySource(value)); + } + } + + static EmptySource forType(XContentType type) { + return values.get(type); + } + + private final XContentType type; + + private final BytesReference sourceRef; + + private EmptySource(XContentType type) { + this.type = type; + try { + sourceRef = new BytesArray( + BytesReference.toBytes(BytesReference.bytes(new XContentBuilder(type.xContent(), new BytesStreamOutput()).value(Map.of()))) + ); + } catch (IOException e) { + throw new AssertionError("impossible", e); + } + } + + @Override + public XContentType sourceContentType() { + return type; + } + + @Override + public Map source() { + return Map.of(); + } + + @Override + public BytesReference internalSourceRef() { + return sourceRef; + } + + @Override + public Source filter(SourceFilter sourceFilter) { + return this; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/lookup/Source.java b/server/src/main/java/org/elasticsearch/search/lookup/Source.java index 851044d1efcec..7098cce548c53 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/Source.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/Source.java @@ -74,7 +74,7 @@ default Object extractValue(String path, @Nullable Object nullValue) { * An empty Source, represented as an empty map */ static Source empty(XContentType xContentType) { - return Source.fromMap(Map.of(), xContentType == null ? XContentType.JSON : xContentType); + return EmptySource.forType(xContentType == null ? XContentType.JSON : xContentType); } /** @@ -148,6 +148,9 @@ public Source filter(SourceFilter sourceFilter) { */ static Source fromMap(Map map, XContentType xContentType) { Map sourceMap = map == null ? Map.of() : map; + if (sourceMap.isEmpty()) { + return empty(xContentType); + } return new Source() { @Override public XContentType sourceContentType() { diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 5cabe22389529..fa6ea9c6519d8 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -704,7 +704,10 @@ static DataStream updateDataStream(DataStream dataStream, Metadata.Builder metad .stream() .map(i -> metadata.get(renameIndex(i.getName(), request, true)).getIndex()) .toList(); - return dataStream.copy().setName(dataStreamName).setIndices(updatedIndices).build(); + return dataStream.copy() + .setName(dataStreamName) + .setBackingIndices(dataStream.getBackingIndices().copy().setIndices(updatedIndices).build()) + .build(); } public static RestoreInProgress updateRestoreStateWithDeletedIndices(RestoreInProgress oldRestore, Set deletedIndices) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index 6098ea777d38a..7ccdb5da6d736 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.admin.cluster.reroute; import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; @@ -22,6 +21,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.admin.cluster.RestClusterRerouteAction; import org.elasticsearch.test.ESTestCase; @@ -202,7 +202,7 @@ private RestRequest toRestRequest(ClusterRerouteRequest original) throws IOExcep if (original.isRetryFailed() || randomBoolean()) { params.put("retry_failed", Boolean.toString(original.isRetryFailed())); } - if (false == original.masterNodeTimeout().equals(MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT) || randomBoolean()) { + if (false == original.masterNodeTimeout().equals(TimeValue.THIRTY_SECONDS) || randomBoolean()) { params.put(REST_MASTER_TIMEOUT_PARAM, original.masterNodeTimeout().toString()); } if (original.getCommands() != null) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index 149752578e1ea..c2edf9729b8b8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -682,7 +682,9 @@ public void testRolloverClusterStateForDataStreamFailureStore() throws Exception Metadata.Builder builder = Metadata.builder(); builder.put("template", template); dataStream.getIndices().forEach(index -> builder.put(DataStreamTestHelper.getIndexMetadataBuilderForIndex(index))); - dataStream.getFailureIndices().forEach(index -> builder.put(DataStreamTestHelper.getIndexMetadataBuilderForIndex(index))); + dataStream.getFailureIndices() + .getIndices() + .forEach(index -> builder.put(DataStreamTestHelper.getIndexMetadataBuilderForIndex(index))); builder.put(dataStream); final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metadata(builder).build(); final TestTelemetryPlugin telemetryPlugin = new TestTelemetryPlugin(); @@ -723,15 +725,18 @@ public void testRolloverClusterStateForDataStreamFailureStore() throws Exception assertEquals(sourceIndexName, rolloverResult.sourceIndexName()); assertEquals(newIndexName, rolloverResult.rolloverIndexName()); Metadata rolloverMetadata = rolloverResult.clusterState().metadata(); - assertEquals(dataStream.getIndices().size() + dataStream.getFailureIndices().size() + 1, rolloverMetadata.indices().size()); + assertEquals( + dataStream.getIndices().size() + dataStream.getFailureIndices().getIndices().size() + 1, + rolloverMetadata.indices().size() + ); IndexMetadata rolloverIndexMetadata = rolloverMetadata.index(newIndexName); var ds = (DataStream) rolloverMetadata.getIndicesLookup().get(dataStream.getName()); assertThat(ds.getType(), equalTo(IndexAbstraction.Type.DATA_STREAM)); assertThat(ds.getIndices(), hasSize(dataStream.getIndices().size())); - assertThat(ds.getFailureIndices(), hasSize(dataStream.getFailureIndices().size() + 1)); - assertThat(ds.getFailureIndices(), hasItem(rolloverMetadata.index(sourceIndexName).getIndex())); - assertThat(ds.getFailureIndices(), hasItem(rolloverIndexMetadata.getIndex())); + assertThat(ds.getFailureIndices().getIndices(), hasSize(dataStream.getFailureIndices().getIndices().size() + 1)); + assertThat(ds.getFailureIndices().getIndices(), hasItem(rolloverMetadata.index(sourceIndexName).getIndex())); + assertThat(ds.getFailureIndices().getIndices(), hasItem(rolloverIndexMetadata.getIndex())); assertThat(ds.getFailureStoreWriteIndex(), equalTo(rolloverIndexMetadata.getIndex())); RolloverInfo info = rolloverMetadata.index(sourceIndexName).getRolloverInfos().get(dataStream.getName()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index 42c4dec3e219b..9dbabe2c41893 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -440,12 +440,13 @@ public void testLazyRollover() throws Exception { doAnswer(invocation -> { Object[] args = invocation.getArguments(); - assert args.length == 5; + assert args.length == 6; @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) args[4]; + ActionListener listener = (ActionListener) args[5]; listener.onResponse(AcknowledgedResponse.TRUE); return null; - }).when(mockMetadataDataStreamService).setRolloverOnWrite(eq(dataStream.getName()), eq(true), any(), any(), anyActionListener()); + }).when(mockMetadataDataStreamService) + .setRolloverOnWrite(eq(dataStream.getName()), eq(true), eq(false), any(), any(), anyActionListener()); final TransportRolloverAction transportRolloverAction = new TransportRolloverAction( mock(TransportService.class), diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java index 9803082bbd88a..8bc2a978af0cf 100644 --- a/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/datastreams/autosharding/DataStreamAutoShardingServiceTests.java @@ -768,10 +768,10 @@ private DataStream createDataStream( builder.put(indexMetadata, false); backingIndices.add(indexMetadata.getIndex()); } - return DataStream.builder(dataStreamName, backingIndices) - .setGeneration(backingIndicesCount) - .setAutoShardingEvent(autoShardingEvent) - .build(); + return DataStream.builder( + dataStreamName, + DataStream.DataStreamIndices.backingIndicesBuilder(backingIndices).setAutoShardingEvent(autoShardingEvent).build() + ).setGeneration(backingIndicesCount).build(); } private IndexMetadata createIndexMetadata( diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index 94e0ce1ccaf17..6d24f8d2fe9e0 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -153,7 +153,9 @@ public static class Request extends MasterNodeRequest implements Indice private String[] indices = Strings.EMPTY_ARRAY; private final RefCounted refCounted = AbstractRefCounted.of(() -> {}); - Request() {} + Request() { + super(TimeValue.THIRTY_SECONDS); + } Request(StreamInput in) throws IOException { super(in); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index d42b6096b6e32..5a5d5f46de413 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -73,7 +73,7 @@ protected DataStream doParseInstance(XContentParser parser) throws IOException { @Override protected Writeable.Reader instanceReader() { - return DataStream::new; + return DataStream::read; } @Override @@ -94,10 +94,12 @@ protected DataStream mutateInstance(DataStream instance) { var indexMode = instance.getIndexMode(); var lifecycle = instance.getLifecycle(); var failureStore = instance.isFailureStoreEnabled(); - var failureIndices = instance.getFailureIndices(); + var failureIndices = instance.getFailureIndices().getIndices(); var rolloverOnWrite = instance.rolloverOnWrite(); var autoShardingEvent = instance.getAutoShardingEvent(); - switch (between(0, 12)) { + var failureRolloverOnWrite = instance.getFailureIndices().isRolloverOnWrite(); + var failureAutoShardingEvent = instance.getBackingIndices().getAutoShardingEvent(); + switch (between(0, 14)) { case 0 -> name = randomAlphaOfLength(10); case 1 -> indices = randomNonEmptyIndexInstances(); case 2 -> generation = instance.getGeneration() + randomIntBetween(1, 10); @@ -114,6 +116,7 @@ protected DataStream mutateInstance(DataStream instance) { isReplicated = isReplicated == false; // Replicated data streams cannot be marked for lazy rollover. rolloverOnWrite = isReplicated == false && rolloverOnWrite; + failureRolloverOnWrite = isReplicated == false && failureRolloverOnWrite; } case 6 -> { if (isSystem == false) { @@ -139,7 +142,27 @@ protected DataStream mutateInstance(DataStream instance) { isReplicated = rolloverOnWrite == false && isReplicated; } case 12 -> { - autoShardingEvent = randomBoolean() && autoShardingEvent != null + if (randomBoolean() || autoShardingEvent == null) { + // If we're mutating the auto sharding event of the failure store, we need to ensure there's at least one failure index. + if (failureIndices.isEmpty()) { + failureIndices = DataStreamTestHelper.randomNonEmptyIndexInstances(); + failureStore = true; + } + autoShardingEvent = new DataStreamAutoShardingEvent( + failureIndices.get(failureIndices.size() - 1).getName(), + randomIntBetween(1, 10), + randomMillisUpToYear9999() + ); + } else { + autoShardingEvent = null; + } + } + case 13 -> { + failureRolloverOnWrite = failureRolloverOnWrite == false; + isReplicated = failureRolloverOnWrite == false && isReplicated; + } + case 14 -> { + failureAutoShardingEvent = randomBoolean() && failureAutoShardingEvent != null ? null : new DataStreamAutoShardingEvent( indices.get(indices.size() - 1).getName(), @@ -151,25 +174,29 @@ protected DataStream mutateInstance(DataStream instance) { return new DataStream( name, - indices, generation, metadata, isHidden, isReplicated, isSystem, + System::currentTimeMillis, allowsCustomRouting, indexMode, lifecycle, failureStore, - failureIndices, - rolloverOnWrite, - autoShardingEvent + new DataStream.DataStreamIndices(DataStream.BACKING_INDEX_PREFIX, indices, rolloverOnWrite, autoShardingEvent), + new DataStream.DataStreamIndices( + DataStream.BACKING_INDEX_PREFIX, + failureIndices, + failureRolloverOnWrite, + failureAutoShardingEvent + ) ); } public void testRollover() { DataStream ds = DataStreamTestHelper.randomInstance().promoteDataStream(); - Tuple newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); + Tuple newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA, ds.getBackingIndices()); final DataStream rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), false, null); assertThat(rolledDs.getName(), equalTo(ds.getName())); assertThat(rolledDs.getGeneration(), equalTo(ds.getGeneration() + 1)); @@ -196,7 +223,7 @@ public void testRolloverWithConflictingBackingIndexName() { builder.put(im, false); } - final Tuple newCoordinates = ds.nextWriteIndexAndGeneration(builder.build()); + final Tuple newCoordinates = ds.nextWriteIndexAndGeneration(builder.build(), ds.getBackingIndices()); final DataStream rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), false, null); assertThat(rolledDs.getName(), equalTo(ds.getName())); assertThat(rolledDs.getGeneration(), equalTo(ds.getGeneration() + numConflictingIndices + 1)); @@ -212,7 +239,7 @@ public void testRolloverUpgradeToTsdbDataStream() { .setReplicated(false) .setIndexMode(randomBoolean() ? IndexMode.STANDARD : null) .build(); - var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); + var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA, ds.getBackingIndices()); var rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), true, null); assertThat(rolledDs.getName(), equalTo(ds.getName())); @@ -225,7 +252,7 @@ public void testRolloverUpgradeToTsdbDataStream() { public void testRolloverDowngradeToRegularDataStream() { DataStream ds = DataStreamTestHelper.randomInstance().copy().setReplicated(false).setIndexMode(IndexMode.TIME_SERIES).build(); - var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA); + var newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA, ds.getBackingIndices()); var rolledDs = ds.rollover(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2(), false, null); assertThat(rolledDs.getName(), equalTo(ds.getName())); @@ -238,18 +265,18 @@ public void testRolloverDowngradeToRegularDataStream() { public void testRolloverFailureStore() { DataStream ds = DataStreamTestHelper.randomInstance(true).promoteDataStream(); - Tuple newCoordinates = ds.nextFailureStoreWriteIndexAndGeneration(Metadata.EMPTY_METADATA); + Tuple newCoordinates = ds.nextWriteIndexAndGeneration(Metadata.EMPTY_METADATA, ds.getFailureIndices()); final DataStream rolledDs = ds.rolloverFailureStore(new Index(newCoordinates.v1(), UUIDs.randomBase64UUID()), newCoordinates.v2()); assertThat(rolledDs.getName(), equalTo(ds.getName())); assertThat(rolledDs.getGeneration(), equalTo(ds.getGeneration() + 1)); assertThat(rolledDs.getIndices().size(), equalTo(ds.getIndices().size())); // Ensure that the rolloverOnWrite flag hasn't changed when rolling over a failure store. assertThat(rolledDs.rolloverOnWrite(), equalTo(ds.rolloverOnWrite())); - assertThat(rolledDs.getFailureIndices().size(), equalTo(ds.getFailureIndices().size() + 1)); + assertThat(rolledDs.getFailureIndices().getIndices().size(), equalTo(ds.getFailureIndices().getIndices().size() + 1)); assertTrue(rolledDs.getIndices().containsAll(ds.getIndices())); assertTrue(rolledDs.getIndices().contains(rolledDs.getWriteIndex())); - assertTrue(rolledDs.getFailureIndices().containsAll(ds.getFailureIndices())); - assertTrue(rolledDs.getFailureIndices().contains(rolledDs.getFailureStoreWriteIndex())); + assertTrue(rolledDs.getFailureIndices().getIndices().containsAll(ds.getFailureIndices().getIndices())); + assertTrue(rolledDs.getFailureIndices().getIndices().contains(rolledDs.getFailureStoreWriteIndex())); } public void testRemoveBackingIndex() { @@ -298,15 +325,18 @@ public void testRemoveBackingWriteIndex() { public void testRemoveFailureStoreIndex() { DataStream original = createRandomDataStream(); - int indexToRemove = randomIntBetween(1, original.getFailureIndices().size() - 1); + int indexToRemove = randomIntBetween(1, original.getFailureIndices().getIndices().size() - 1); - DataStream updated = original.removeFailureStoreIndex(original.getFailureIndices().get(indexToRemove - 1)); + DataStream updated = original.removeFailureStoreIndex(original.getFailureIndices().getIndices().get(indexToRemove - 1)); assertThat(updated.getName(), equalTo(original.getName())); assertThat(updated.getGeneration(), equalTo(original.getGeneration() + 1)); assertThat(updated.getIndices().size(), equalTo(original.getIndices().size())); - assertThat(updated.getFailureIndices().size(), equalTo(original.getFailureIndices().size() - 1)); - for (int k = 0; k < (original.getFailureIndices().size() - 1); k++) { - assertThat(updated.getFailureIndices().get(k), equalTo(original.getFailureIndices().get(k < (indexToRemove - 1) ? k : k + 1))); + assertThat(updated.getFailureIndices().getIndices().size(), equalTo(original.getFailureIndices().getIndices().size() - 1)); + for (int k = 0; k < (original.getFailureIndices().getIndices().size() - 1); k++) { + assertThat( + updated.getFailureIndices().getIndices().get(k), + equalTo(original.getFailureIndices().getIndices().get(k < (indexToRemove - 1) ? k : k + 1)) + ); } } @@ -326,7 +356,9 @@ public void testRemoveFailureStoreWriteIndex() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> original.removeFailureStoreIndex(original.getFailureIndices().get(original.getFailureIndices().size() - 1)) + () -> original.removeFailureStoreIndex( + original.getFailureIndices().getIndices().get(original.getFailureIndices().getIndices().size() - 1) + ) ); assertThat( e.getMessage(), @@ -334,7 +366,7 @@ public void testRemoveFailureStoreWriteIndex() { String.format( Locale.ROOT, "cannot remove backing index [%s] of data stream [%s] because it is the write index", - original.getFailureIndices().get(original.getFailureIndices().size() - 1).getName(), + original.getFailureIndices().getIndices().get(original.getFailureIndices().getIndices().size() - 1).getName(), original.getName() ) ) @@ -379,9 +411,9 @@ public void testAddBackingIndexThatIsPartOfAnotherDataStream() { builder.put(ds2); createMetadataForIndices(builder, ds1.getIndices()); - createMetadataForIndices(builder, ds1.getFailureIndices()); + createMetadataForIndices(builder, ds1.getFailureIndices().getIndices()); createMetadataForIndices(builder, ds2.getIndices()); - createMetadataForIndices(builder, ds2.getFailureIndices()); + createMetadataForIndices(builder, ds2.getFailureIndices().getIndices()); Index indexToAdd = randomFrom(ds2.getIndices().toArray(Index.EMPTY_ARRAY)); @@ -409,11 +441,11 @@ public void testAddBackingIndexThatIsPartOfDataStreamFailureStore() { builder.put(ds2); createMetadataForIndices(builder, ds1.getIndices()); - createMetadataForIndices(builder, ds1.getFailureIndices()); + createMetadataForIndices(builder, ds1.getFailureIndices().getIndices()); createMetadataForIndices(builder, ds2.getIndices()); - createMetadataForIndices(builder, ds2.getFailureIndices()); + createMetadataForIndices(builder, ds2.getFailureIndices().getIndices()); - Index indexToAdd = randomFrom(ds2.getFailureIndices().toArray(Index.EMPTY_ARRAY)); + Index indexToAdd = randomFrom(ds2.getFailureIndices().getIndices().toArray(Index.EMPTY_ARRAY)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> ds1.addBackingIndex(builder.build(), indexToAdd)); assertThat( @@ -498,7 +530,7 @@ public void testAddFailureStoreIndex() { builder.put(original); createMetadataForIndices(builder, original.getIndices()); - createMetadataForIndices(builder, original.getFailureIndices()); + createMetadataForIndices(builder, original.getFailureIndices().getIndices()); Index indexToAdd = new Index(randomAlphaOfLength(4), UUIDs.randomBase64UUID(random())); builder.put( @@ -514,11 +546,11 @@ public void testAddFailureStoreIndex() { assertThat(updated.getName(), equalTo(original.getName())); assertThat(updated.getGeneration(), equalTo(original.getGeneration() + 1)); assertThat(updated.getIndices().size(), equalTo(original.getIndices().size())); - assertThat(updated.getFailureIndices().size(), equalTo(original.getFailureIndices().size() + 1)); - for (int k = 1; k <= original.getFailureIndices().size(); k++) { - assertThat(updated.getFailureIndices().get(k), equalTo(original.getFailureIndices().get(k - 1))); + assertThat(updated.getFailureIndices().getIndices().size(), equalTo(original.getFailureIndices().getIndices().size() + 1)); + for (int k = 1; k <= original.getFailureIndices().getIndices().size(); k++) { + assertThat(updated.getFailureIndices().getIndices().get(k), equalTo(original.getFailureIndices().getIndices().get(k - 1))); } - assertThat(updated.getFailureIndices().get(0), equalTo(indexToAdd)); + assertThat(updated.getFailureIndices().getIndices().get(0), equalTo(indexToAdd)); } public void testAddFailureStoreIndexThatIsPartOfAnotherDataStream() { @@ -530,11 +562,11 @@ public void testAddFailureStoreIndexThatIsPartOfAnotherDataStream() { builder.put(ds2); createMetadataForIndices(builder, ds1.getIndices()); - createMetadataForIndices(builder, ds1.getFailureIndices()); + createMetadataForIndices(builder, ds1.getFailureIndices().getIndices()); createMetadataForIndices(builder, ds2.getIndices()); - createMetadataForIndices(builder, ds2.getFailureIndices()); + createMetadataForIndices(builder, ds2.getFailureIndices().getIndices()); - Index indexToAdd = randomFrom(ds2.getFailureIndices().toArray(Index.EMPTY_ARRAY)); + Index indexToAdd = randomFrom(ds2.getFailureIndices().getIndices().toArray(Index.EMPTY_ARRAY)); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -563,9 +595,9 @@ public void testAddFailureStoreIndexThatIsPartOfDataStreamBackingIndices() { builder.put(ds2); createMetadataForIndices(builder, ds1.getIndices()); - createMetadataForIndices(builder, ds1.getFailureIndices()); + createMetadataForIndices(builder, ds1.getFailureIndices().getIndices()); createMetadataForIndices(builder, ds2.getIndices()); - createMetadataForIndices(builder, ds2.getFailureIndices()); + createMetadataForIndices(builder, ds2.getFailureIndices().getIndices()); Index indexToAdd = randomFrom(ds2.getIndices().toArray(Index.EMPTY_ARRAY)); @@ -594,16 +626,16 @@ public void testAddExistingFailureStoreIndex() { builder.put(original); createMetadataForIndices(builder, original.getIndices()); - createMetadataForIndices(builder, original.getFailureIndices()); + createMetadataForIndices(builder, original.getFailureIndices().getIndices()); - Index indexToAdd = randomFrom(original.getFailureIndices().toArray(Index.EMPTY_ARRAY)); + Index indexToAdd = randomFrom(original.getFailureIndices().getIndices().toArray(Index.EMPTY_ARRAY)); DataStream updated = original.addFailureStoreIndex(builder.build(), indexToAdd); assertThat(updated.getName(), equalTo(original.getName())); assertThat(updated.getGeneration(), equalTo(original.getGeneration())); assertThat(updated.getIndices().size(), equalTo(original.getIndices().size())); - assertThat(updated.getFailureIndices().size(), equalTo(original.getFailureIndices().size())); - assertThat(updated.getFailureIndices(), equalTo(original.getFailureIndices())); + assertThat(updated.getFailureIndices().getIndices().size(), equalTo(original.getFailureIndices().getIndices().size())); + assertThat(updated.getFailureIndices().getIndices(), equalTo(original.getFailureIndices().getIndices())); } public void testAddFailureStoreIndexWithAliases() { @@ -613,7 +645,7 @@ public void testAddFailureStoreIndexWithAliases() { builder.put(original); createMetadataForIndices(builder, original.getIndices()); - createMetadataForIndices(builder, original.getFailureIndices()); + createMetadataForIndices(builder, original.getFailureIndices().getIndices()); Index indexToAdd = new Index(randomAlphaOfLength(4), UUIDs.randomBase64UUID(random())); IndexMetadata.Builder b = IndexMetadata.builder(indexToAdd.getName()) @@ -743,11 +775,16 @@ public void testSnapshot() { var replicated = preSnapshotDataStream.isReplicated() && randomBoolean(); var postSnapshotDataStream = preSnapshotDataStream.copy() - .setIndices(postSnapshotIndices) + .setBackingIndices( + preSnapshotDataStream.getBackingIndices() + .copy() + .setIndices(postSnapshotIndices) + .setRolloverOnWrite(replicated == false && preSnapshotDataStream.rolloverOnWrite()) + .build() + ) .setGeneration(preSnapshotDataStream.getGeneration() + randomIntBetween(0, 5)) .setMetadata(preSnapshotDataStream.getMetadata() == null ? null : new HashMap<>(preSnapshotDataStream.getMetadata())) .setReplicated(replicated) - .setRolloverOnWrite(replicated == false && preSnapshotDataStream.rolloverOnWrite()) .build(); var reconciledDataStream = postSnapshotDataStream.snapshot( @@ -775,7 +812,9 @@ public void testSnapshotWithAllBackingIndicesRemoved() { var preSnapshotDataStream = DataStreamTestHelper.randomInstance(); var indicesToAdd = randomNonEmptyIndexInstances(); - var postSnapshotDataStream = preSnapshotDataStream.copy().setIndices(indicesToAdd).build(); + var postSnapshotDataStream = preSnapshotDataStream.copy() + .setBackingIndices(preSnapshotDataStream.getBackingIndices().copy().setIndices(indicesToAdd).build()) + .build(); assertNull(postSnapshotDataStream.snapshot(preSnapshotDataStream.getIndices().stream().map(Index::getName).toList())); } @@ -1769,7 +1808,6 @@ public void testXContentSerializationWithRolloverAndEffectiveRetention() throws isSystem, randomBoolean(), isSystem, - System::currentTimeMillis, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : null, // IndexMode.TIME_SERIES triggers validation that many unit tests doesn't pass lifecycle, @@ -1958,12 +1996,11 @@ public void testWriteFailureIndex() { hidden, replicated, system, - System::currentTimeMillis, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, DataStreamLifecycleTests.randomLifecycle(), false, - null, + List.of(), replicated == false && randomBoolean(), null ); @@ -1977,7 +2014,6 @@ public void testWriteFailureIndex() { hidden, replicated, system, - System::currentTimeMillis, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, DataStreamLifecycleTests.randomLifecycle(), @@ -2003,7 +2039,6 @@ public void testWriteFailureIndex() { hidden, replicated, system, - System::currentTimeMillis, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, DataStreamLifecycleTests.randomLifecycle(), @@ -2028,12 +2063,11 @@ public void testIsFailureIndex() { hidden, replicated, system, - System::currentTimeMillis, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, DataStreamLifecycleTests.randomLifecycle(), false, - null, + List.of(), replicated == false && randomBoolean(), null ); @@ -2051,7 +2085,6 @@ public void testIsFailureIndex() { hidden, replicated, system, - System::currentTimeMillis, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, DataStreamLifecycleTests.randomLifecycle(), @@ -2083,7 +2116,6 @@ public void testIsFailureIndex() { hidden, replicated, system, - System::currentTimeMillis, randomBoolean(), randomBoolean() ? IndexMode.STANDARD : IndexMode.TIME_SERIES, DataStreamLifecycleTests.randomLifecycle(), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java index 9a560abe20c74..d4639c3d3118e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsServiceTests.java @@ -357,7 +357,12 @@ public void testRemoveBrokenBackingIndexReference() { var state = DataStreamTestHelper.getClusterStateWithDataStreams(List.of(new Tuple<>(dataStreamName, 2)), List.of()); var original = state.getMetadata().dataStreams().get(dataStreamName); var broken = original.copy() - .setIndices(List.of(new Index(original.getIndices().get(0).getName(), "broken"), original.getIndices().get(1))) + .setBackingIndices( + original.getBackingIndices() + .copy() + .setIndices(List.of(new Index(original.getIndices().get(0).getName(), "broken"), original.getIndices().get(1))) + .build() + ) .build(); var brokenState = ClusterState.builder(state).metadata(Metadata.builder(state.getMetadata()).put(broken).build()).build(); diff --git a/server/src/test/java/org/elasticsearch/common/time/Iso8601ParserTests.java b/server/src/test/java/org/elasticsearch/common/time/Iso8601ParserTests.java new file mode 100644 index 0000000000000..bfb03ea9496e5 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/time/Iso8601ParserTests.java @@ -0,0 +1,427 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.time; + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matcher; + +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.format.DateTimeParseException; +import java.time.format.ResolverStyle; +import java.time.format.SignStyle; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.time.temporal.TemporalQueries; +import java.time.temporal.ValueRange; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +import static java.time.temporal.ChronoField.DAY_OF_MONTH; +import static java.time.temporal.ChronoField.HOUR_OF_DAY; +import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; +import static java.time.temporal.ChronoField.MONTH_OF_YEAR; +import static java.time.temporal.ChronoField.NANO_OF_SECOND; +import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; +import static java.time.temporal.ChronoField.YEAR; +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class Iso8601ParserTests extends ESTestCase { + + private static Iso8601Parser defaultParser() { + return new Iso8601Parser(Set.of(), true, Map.of()); + } + + private static Matcher hasResult(DateTime dateTime) { + return transformedMatch(Iso8601Parser.Result::result, equalTo(dateTime)); + } + + private static Matcher hasError(int parseError) { + return transformedMatch(Iso8601Parser.Result::errorIndex, equalTo(parseError)); + } + + public void testStrangeParses() { + assertThat(defaultParser().tryParse("-9999-01-01", null), hasResult(new DateTime(-9999, 1, 1, null, null, null, null, null, null))); + assertThat(defaultParser().tryParse("1000", null), hasResult(new DateTime(1000, null, null, null, null, null, null, null, null))); + assertThat(defaultParser().tryParse("2023-02-02T", null), hasResult(new DateTime(2023, 2, 2, null, null, null, null, null, null))); + + // these are accepted by the previous formatters, but are not valid ISO8601 + assertThat(defaultParser().tryParse("2023-01-01T12:00:00.01,02", null), hasError(22)); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00Europe/Paris+0400", null), hasError(19)); + } + + public void testOutOfRange() { + assertThat(defaultParser().tryParse("2023-13-12", null), hasError(5)); + assertThat(defaultParser().tryParse("2023-12-32", null), hasError(8)); + assertThat(defaultParser().tryParse("2023-12-31T24", null), hasError(11)); + assertThat(defaultParser().tryParse("2023-12-31T23:60", null), hasError(14)); + assertThat(defaultParser().tryParse("2023-12-31T23:59:60", null), hasError(17)); + assertThat(defaultParser().tryParse("2023-12-31T23:59:59+18:30", null), hasError(19)); + } + + public void testMandatoryFields() { + assertThat( + new Iso8601Parser(Set.of(YEAR), true, Map.of()).tryParse("2023", null), + hasResult(new DateTime(2023, null, null, null, null, null, null, null, null)) + ); + assertThat(new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR), true, Map.of()).tryParse("2023", null), hasError(4)); + + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR), true, Map.of()).tryParse("2023-06", null), + hasResult(new DateTime(2023, 6, null, null, null, null, null, null, null)) + ); + assertThat(new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH), true, Map.of()).tryParse("2023-06", null), hasError(7)); + + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH), true, Map.of()).tryParse("2023-06-20", null), + hasResult(new DateTime(2023, 6, 20, null, null, null, null, null, null)) + ); + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY), false, Map.of()).tryParse("2023-06-20", null), + hasError(10) + ); + + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY), false, Map.of()).tryParse("2023-06-20T15", null), + hasResult(new DateTime(2023, 6, 20, 15, 0, 0, 0, null, null)) + ); + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR), false, Map.of()).tryParse( + "2023-06-20T15", + null + ), + hasError(13) + ); + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR), false, Map.of()).tryParse( + "2023-06-20T15Z", + null + ), + hasError(13) + ); + + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR), false, Map.of()).tryParse( + "2023-06-20T15:48", + null + ), + hasResult(new DateTime(2023, 6, 20, 15, 48, 0, 0, null, null)) + ); + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), false, Map.of()) + .tryParse("2023-06-20T15:48", null), + hasError(16) + ); + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), false, Map.of()) + .tryParse("2023-06-20T15:48Z", null), + hasError(16) + ); + + assertThat( + new Iso8601Parser(Set.of(YEAR, MONTH_OF_YEAR, DAY_OF_MONTH, HOUR_OF_DAY, MINUTE_OF_HOUR, SECOND_OF_MINUTE), false, Map.of()) + .tryParse("2023-06-20T15:48:09", null), + hasResult(new DateTime(2023, 6, 20, 15, 48, 9, 0, null, null)) + ); + } + + public void testParseNanos() { + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00.5", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 500_000_000, null, null)) + ); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00,5", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 500_000_000, null, null)) + ); + + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00.05", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 50_000_000, null, null)) + ); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00,005", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 5_000_000, null, null)) + ); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00.0005", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 500_000, null, null)) + ); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00,00005", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 50_000, null, null)) + ); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00.000005", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 5_000, null, null)) + ); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00,0000005", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 500, null, null)) + ); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00.00000005", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 50, null, null)) + ); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00,000000005", null), + hasResult(new DateTime(2023, 1, 1, 12, 0, 0, 5, null, null)) + ); + + // too many nanos + assertThat(defaultParser().tryParse("2023-01-01T12:00:00.0000000005", null), hasError(29)); + } + + private static Matcher hasTimezone(ZoneId offset) { + return transformedMatch(r -> r.result().query(TemporalQueries.zone()), equalTo(offset)); + } + + public void testParseTimezones() { + // using defaults + assertThat(defaultParser().tryParse("2023-01-01T12:00:00", null), hasTimezone(null)); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00", ZoneOffset.UTC), hasTimezone(ZoneOffset.UTC)); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00", ZoneOffset.ofHours(-3)), hasTimezone(ZoneOffset.ofHours(-3))); + + // timezone specified + assertThat(defaultParser().tryParse("2023-01-01T12:00:00Z", null), hasTimezone(ZoneOffset.UTC)); + + assertThat(defaultParser().tryParse("2023-01-01T12:00:00-05", null), hasTimezone(ZoneOffset.ofHours(-5))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00+11", null), hasTimezone(ZoneOffset.ofHours(11))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00+0830", null), hasTimezone(ZoneOffset.ofHoursMinutes(8, 30))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00-0415", null), hasTimezone(ZoneOffset.ofHoursMinutes(-4, -15))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00+08:30", null), hasTimezone(ZoneOffset.ofHoursMinutes(8, 30))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00-04:15", null), hasTimezone(ZoneOffset.ofHoursMinutes(-4, -15))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00+011030", null), hasTimezone(ZoneOffset.ofHoursMinutesSeconds(1, 10, 30))); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00-074520", null), + hasTimezone(ZoneOffset.ofHoursMinutesSeconds(-7, -45, -20)) + ); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00+01:10:30", null), + hasTimezone(ZoneOffset.ofHoursMinutesSeconds(1, 10, 30)) + ); + assertThat( + defaultParser().tryParse("2023-01-01T12:00:00-07:45:20", null), + hasTimezone(ZoneOffset.ofHoursMinutesSeconds(-7, -45, -20)) + ); + + assertThat(defaultParser().tryParse("2023-01-01T12:00:00GMT", null), hasTimezone(ZoneId.of("GMT"))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00UTC", null), hasTimezone(ZoneId.of("UTC"))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00UT", null), hasTimezone(ZoneId.of("UT"))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00GMT+3", null), hasTimezone(ZoneId.of("GMT+3"))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00UTC-4", null), hasTimezone(ZoneId.of("UTC-4"))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00UT+6", null), hasTimezone(ZoneId.of("UT+6"))); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00Europe/Paris", null), hasTimezone(ZoneId.of("Europe/Paris"))); + + // we could be more specific in the error index for invalid timezones, + // but that would require keeping track & propagating Result objects within date-time parsing just for the ZoneId + assertThat(defaultParser().tryParse("2023-01-01T12:00:00+04:0030", null), hasError(19)); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00+0400:30", null), hasError(19)); + assertThat(defaultParser().tryParse("2023-01-01T12:00:00Invalid", null), hasError(19)); + } + + private static void assertEquivalent(String text, DateTimeFormatter formatter) { + TemporalAccessor expected = formatter.parse(text); + TemporalAccessor actual = defaultParser().tryParse(text, null).result(); + assertThat(actual, is(notNullValue())); + + assertThat(actual.query(TemporalQueries.localDate()), equalTo(expected.query(TemporalQueries.localDate()))); + assertThat(actual.query(TemporalQueries.localTime()), equalTo(expected.query(TemporalQueries.localTime()))); + assertThat(actual.query(TemporalQueries.zone()), equalTo(expected.query(TemporalQueries.zone()))); + } + + private static void assertEquivalentFailure(String text, DateTimeFormatter formatter) { + DateTimeParseException expected = expectThrows(DateTimeParseException.class, () -> formatter.parse(text)); + int error = defaultParser().tryParse(text, null).errorIndex(); + assertThat(error, greaterThanOrEqualTo(0)); + + assertThat(error, equalTo(expected.getErrorIndex())); + } + + public void testEquivalence() { + // test that Iso8601Parser produces the same output as DateTimeFormatter + DateTimeFormatter mandatoryFormatter = new DateTimeFormatterBuilder().append(DateTimeFormatter.ISO_LOCAL_DATE_TIME) + .optionalStart() + .appendZoneOrOffsetId() + .optionalEnd() + .optionalStart() + .appendOffset("+HHmm", "Z") + .optionalEnd() + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT); + + // just checking timezones/ids here + assertEquivalent("2023-01-01T12:00:00", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00Z", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00UT", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00UTC", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00GMT", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00+00", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00-00", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00+05", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00+0500", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00+05:00", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00+05:00:30", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00-07", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00-0715", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00-07:15", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00UTC+05:00", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00GMT-09:45:30", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00Zulu", mandatoryFormatter); + assertEquivalent("2023-01-01T12:00:00Europe/Paris", mandatoryFormatter); + + assertEquivalentFailure("2023-01-01T12:00:00+5", mandatoryFormatter); + assertEquivalentFailure("2023-01-01T12:00:00-7", mandatoryFormatter); + assertEquivalentFailure("2023-01-01T12:00:00InvalidTimeZone", mandatoryFormatter); + + DateTimeFormatter allFieldsOptional = new DateTimeFormatterBuilder().appendValue(YEAR, 4, 4, SignStyle.EXCEEDS_PAD) + .optionalStart() + .appendLiteral('-') + .appendValue(MONTH_OF_YEAR, 2) + .optionalStart() + .appendLiteral('-') + .appendValue(DAY_OF_MONTH, 2) + .optionalStart() + .appendLiteral('T') + .appendValue(HOUR_OF_DAY, 2) + .optionalStart() + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2) + .optionalEnd() + .optionalEnd() + .optionalEnd() + .optionalEnd() + .optionalEnd() + .optionalStart() + .appendZoneOrOffsetId() + .optionalEnd() + .optionalStart() + .appendOffset("+HHmm", "Z") + .optionalEnd() + .toFormatter(Locale.ROOT) + .withResolverStyle(ResolverStyle.STRICT); + + assertEquivalent("2023", allFieldsOptional); + assertEquivalent("2023-04", allFieldsOptional); + assertEquivalent("2023-04-08", allFieldsOptional); + assertEquivalent("2023-04-08T13", allFieldsOptional); + assertEquivalent("2023-04-08T13:45", allFieldsOptional); + assertEquivalent("2023-04-08T13:45:50", allFieldsOptional); + assertEquivalent("-2023-04-08T13:45:50", allFieldsOptional); + } + + private static int randomValue(ValueRange range) { + assert range.isIntValue(); + return randomIntBetween((int) range.getMinimum(), (int) range.getMaximum()); + } + + public void testDefaults() { + Map defaults = Map.of( + MONTH_OF_YEAR, + randomValue(MONTH_OF_YEAR.range()), + DAY_OF_MONTH, + randomValue(DAY_OF_MONTH.range()), + HOUR_OF_DAY, + randomValue(HOUR_OF_DAY.range()), + MINUTE_OF_HOUR, + randomValue(MINUTE_OF_HOUR.range()), + SECOND_OF_MINUTE, + randomValue(SECOND_OF_MINUTE.range()), + NANO_OF_SECOND, + randomValue(NANO_OF_SECOND.range()) + ); + + assertThat( + new Iso8601Parser(Set.of(), true, defaults).tryParse("2023", null), + hasResult( + new DateTime( + 2023, + defaults.get(MONTH_OF_YEAR), + defaults.get(DAY_OF_MONTH), + defaults.get(HOUR_OF_DAY), + defaults.get(MINUTE_OF_HOUR), + defaults.get(SECOND_OF_MINUTE), + defaults.get(NANO_OF_SECOND), + null, + null + ) + ) + ); + assertThat( + new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01", null), + hasResult( + new DateTime( + 2023, + 1, + defaults.get(DAY_OF_MONTH), + defaults.get(HOUR_OF_DAY), + defaults.get(MINUTE_OF_HOUR), + defaults.get(SECOND_OF_MINUTE), + defaults.get(NANO_OF_SECOND), + null, + null + ) + ) + ); + assertThat( + new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01-01", null), + hasResult( + new DateTime( + 2023, + 1, + 1, + defaults.get(HOUR_OF_DAY), + defaults.get(MINUTE_OF_HOUR), + defaults.get(SECOND_OF_MINUTE), + defaults.get(NANO_OF_SECOND), + null, + null + ) + ) + ); + assertThat( + new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01-01T00", null), + hasResult( + new DateTime( + 2023, + 1, + 1, + 0, + defaults.get(MINUTE_OF_HOUR), + defaults.get(SECOND_OF_MINUTE), + defaults.get(NANO_OF_SECOND), + null, + null + ) + ) + ); + assertThat( + new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01-01T00:00", null), + hasResult(new DateTime(2023, 1, 1, 0, 0, defaults.get(SECOND_OF_MINUTE), defaults.get(NANO_OF_SECOND), null, null)) + ); + assertThat( + new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01-01T00:00:00", null), + hasResult(new DateTime(2023, 1, 1, 0, 0, 0, defaults.get(NANO_OF_SECOND), null, null)) + ); + assertThat( + new Iso8601Parser(Set.of(), true, defaults).tryParse("2023-01-01T00:00:00.0", null), + hasResult(new DateTime(2023, 1, 1, 0, 0, 0, 0, null, null)) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index 6df9fd1f35f52..c02df8336a66d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -164,7 +164,15 @@ private static FieldMapper createFieldMapper(String parent, String name) { } private static ObjectMapper createObjectMapper(String name) { - return new ObjectMapper(name, name, Explicit.IMPLICIT_TRUE, Explicit.IMPLICIT_TRUE, ObjectMapper.Dynamic.FALSE, emptyMap()); + return new ObjectMapper( + name, + name, + Explicit.IMPLICIT_TRUE, + Explicit.IMPLICIT_TRUE, + Explicit.IMPLICIT_FALSE, + ObjectMapper.Dynamic.FALSE, + emptyMap() + ); } private static NestedObjectMapper createNestedObjectMapper(String name) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java index 0308dac5fa216..65fa4e236bafc 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java @@ -82,6 +82,7 @@ public void testSubfieldOverride() { "object", Explicit.EXPLICIT_TRUE, Explicit.IMPLICIT_TRUE, + Explicit.IMPLICIT_FALSE, ObjectMapper.Dynamic.TRUE, Collections.singletonMap("object.subfield", fieldMapper) ); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 154132c772927..69848e3b93f90 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -165,6 +165,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { assertNotNull(objectMapper); assertFalse(objectMapper.isEnabled()); assertTrue(objectMapper.subobjects()); + assertFalse(objectMapper.trackArraySource()); // Setting 'enabled' to true is allowed, and updates the mapping. update = Strings.toString( @@ -175,6 +176,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { .field("type", "object") .field("enabled", true) .field("subobjects", false) + .field(ObjectMapper.STORE_ARRAY_SOURCE_PARAM, true) .endObject() .endObject() .endObject() @@ -185,6 +187,7 @@ public void testMergeEnabledForIndexTemplates() throws IOException { assertNotNull(objectMapper); assertTrue(objectMapper.isEnabled()); assertFalse(objectMapper.subobjects()); + assertTrue(objectMapper.trackArraySource()); } public void testFieldReplacementForIndexTemplates() throws IOException { @@ -573,6 +576,7 @@ private ObjectMapper createObjectMapperWithAllParametersSet(CheckedConsumer { private String key; private String value; - Request() {} - Request(StreamInput in) throws IOException { super(in); index = in.readString(); @@ -79,6 +78,7 @@ public static class Request extends MasterNodeRequest { } public Request(final String index, final String key, final String value) { + super(TimeValue.THIRTY_SECONDS); this.index = index; this.key = key; this.value = value; diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 5a736b4e1e9dd..bfbd92bd5df22 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -20,7 +21,6 @@ import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobPath; @@ -29,8 +29,8 @@ import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockBigArrays; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -41,11 +41,15 @@ import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.junit.AfterClass; +import org.junit.BeforeClass; import java.util.Arrays; import java.util.Collection; @@ -58,21 +62,31 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isA; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class RepositoriesServiceTests extends ESTestCase { + private static ThreadPool threadPool; + + private ClusterService clusterService; private RepositoriesService repositoriesService; + @BeforeClass + public static void createThreadPool() { + threadPool = new TestThreadPool(RepositoriesService.class.getName()); + } + + @AfterClass + public static void terminateThreadPool() { + if (threadPool != null) { + threadPool.shutdownNow(); + threadPool = null; + } + } + @Override public void setUp() throws Exception { super.setUp(); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - ThreadPool threadPool = mock(ThreadPool.class); - when(threadPool.getThreadContext()).thenReturn(threadContext); - when(threadPool.info(ThreadPool.Names.SNAPSHOT)).thenReturn( - new ThreadPool.Info(ThreadPool.Names.SNAPSHOT, ThreadPool.ThreadPoolType.FIXED, randomIntBetween(1, 10)) - ); + final TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), @@ -82,10 +96,18 @@ public void setUp() throws Exception { null, Collections.emptySet() ); - final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class); - when(clusterApplierService.threadPool()).thenReturn(threadPool); - final ClusterService clusterService = mock(ClusterService.class); - when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService); + + clusterService = ClusterServiceUtils.createClusterService(threadPool); + + // cluster utils publisher does not call AckListener, making some method calls hang indefinitely + // in this test we have a single master node, and it acknowledges cluster state immediately + final var publisher = ClusterServiceUtils.createClusterStatePublisher(clusterService.getClusterApplierService()); + clusterService.getMasterService().setClusterStatePublisher((evt, pub, ack) -> { + publisher.publish(evt, pub, ack); + ack.onCommit(TimeValue.ZERO); + ack.onNodeAck(clusterService.localNode(), null); + }); + Map typesRegistry = Map.of( TestRepository.TYPE, TestRepository::new, @@ -98,16 +120,25 @@ public void setUp() throws Exception { ); repositoriesService = new RepositoriesService( Settings.EMPTY, - mock(ClusterService.class), + clusterService, transportService, typesRegistry, typesRegistry, threadPool, List.of() ); + + clusterService.start(); repositoriesService.start(); } + @Override + public void tearDown() throws Exception { + super.tearDown(); + clusterService.stop(); + repositoriesService.stop(); + } + public void testRegisterInternalRepository() { String repoName = "name"; expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName)); @@ -283,18 +314,11 @@ public void testRegisterRepositorySuccessAfterCreationFailed() { // 2. repository creation successfully when current node become master node and repository is put again var request = new PutRepositoryRequest().name(repoName).type(TestRepository.TYPE); - repositoriesService.registerRepository(request, new ActionListener<>() { - @Override - public void onResponse(AcknowledgedResponse acknowledgedResponse) { - assertTrue(acknowledgedResponse.isAcknowledged()); - assertThat(repositoriesService.repository(repoName), isA(TestRepository.class)); - } - - @Override - public void onFailure(Exception e) { - assert false : e; - } - }); + var resultListener = new SubscribableListener(); + repositoriesService.registerRepository(request, resultListener); + var response = safeAwait(resultListener); + assertTrue(response.isAcknowledged()); + assertThat(repositoriesService.repository(repoName), isA(TestRepository.class)); } private ClusterState createClusterStateWithRepo(String repoName, String repoType) { @@ -320,11 +344,10 @@ private void assertThrowsOnRegister(String repoName) { private static class TestRepository implements Repository { private static final String TYPE = "internal"; + private final RepositoryMetadata metadata; private boolean isClosed; private boolean isStarted; - private final RepositoryMetadata metadata; - private TestRepository(RepositoryMetadata metadata) { this.metadata = metadata; } @@ -357,7 +380,7 @@ public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, Sna @Override public void getRepositoryData(Executor responseExecutor, ActionListener listener) { - listener.onResponse(null); + listener.onResponse(RepositoryData.EMPTY); } @Override diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index adfc333e9dc7e..e18e327734495 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -47,6 +48,7 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -56,6 +58,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.concurrent.BlockingQueue; @@ -71,6 +74,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -494,4 +498,41 @@ private Environment createEnvironment() { .build() ); } + + public void testShardBlobsToDelete() { + final var repo = setupRepo(); + final var shardBlobsToDelete = repo.new ShardBlobsToDelete(); + final var expectedShardGenerations = ShardGenerations.builder(); + final var expectedBlobsToDelete = new HashSet(); + + final var countDownLatch = new CountDownLatch(1); + try (var refs = new RefCountingRunnable(countDownLatch::countDown)) { + for (int index = between(0, 10); index > 0; index--) { + final var indexId = new IndexId(randomIdentifier(), randomUUID()); + for (int shard = between(1, 3); shard > 0; shard--) { + final var shardId = shard; + final var shardGeneration = new ShardGeneration(randomUUID()); + expectedShardGenerations.put(indexId, shard, shardGeneration); + final var blobsToDelete = randomList(10, ESTestCase::randomIdentifier); + final var indexPath = repo.basePath().add("indices").add(indexId.getId()).add(Integer.toString(shard)).buildAsString(); + for (final var blobToDelete : blobsToDelete) { + expectedBlobsToDelete.add(indexPath + blobToDelete); + } + + repo.threadPool() + .generic() + .execute( + ActionRunnable.run( + refs.acquireListener(), + () -> shardBlobsToDelete.addShardDeleteResult(indexId, shardId, shardGeneration, blobsToDelete) + ) + ); + } + } + } + safeAwait(countDownLatch); + assertEquals(expectedShardGenerations.build(), shardBlobsToDelete.getUpdatedShardGenerations()); + shardBlobsToDelete.getBlobPaths().forEachRemaining(s -> assertTrue(expectedBlobsToDelete.remove(s))); + assertThat(expectedBlobsToDelete, empty()); + } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/ReservedClusterStateHandlerTests.java b/server/src/test/java/org/elasticsearch/reservedstate/ReservedClusterStateHandlerTests.java index a0ad31c65c8b8..c92b0b0bf15d2 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/ReservedClusterStateHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/ReservedClusterStateHandlerTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.settings.InternalOrPrivateSettingsPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -43,6 +44,10 @@ public ValidRequest fromXContent(XContentParser parser) throws IOException { } static class ValidRequest extends MasterNodeRequest { + ValidRequest() { + super(TimeValue.THIRTY_SECONDS); + } + @Override public ActionRequestValidationException validate() { return null; diff --git a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java index 5c034a81fc9cd..e693f9a1562fd 100644 --- a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java +++ b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java @@ -269,6 +269,7 @@ public void testManyEval() throws IOException { assertMap(map, matchesMap().entry("columns", columns).entry("values", hasSize(10_000))); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-serverless/issues/1874") public void testTooManyEval() throws IOException { initManyLongs(); assertCircuitBreaks(() -> manyEval(490)); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index bbbafef514e30..c78ed54c13d8f 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -131,13 +131,10 @@ public static DataStream newInstance( @Nullable DataStreamLifecycle lifecycle, @Nullable DataStreamAutoShardingEvent autoShardingEvent ) { - return DataStream.builder(name, indices) - .setGeneration(generation) - .setMetadata(metadata) - .setReplicated(replicated) - .setLifecycle(lifecycle) - .setAutoShardingEvent(autoShardingEvent) - .build(); + return DataStream.builder( + name, + DataStream.DataStreamIndices.backingIndicesBuilder(indices).setAutoShardingEvent(autoShardingEvent).build() + ).setGeneration(generation).setMetadata(metadata).setReplicated(replicated).setLifecycle(lifecycle).build(); } public static DataStream newInstance( @@ -155,7 +152,7 @@ public static DataStream newInstance( .setReplicated(replicated) .setLifecycle(lifecycle) .setFailureStoreEnabled(failureStores.isEmpty() == false) - .setFailureIndices(failureStores) + .setFailureIndices(DataStream.DataStreamIndices.failureIndicesBuilder(failureStores).build()) .build(); } @@ -341,7 +338,6 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time boolean replicated = randomBoolean(); return new DataStream( dataStreamName, - indices, generation, metadata, randomBoolean(), @@ -352,15 +348,30 @@ public static DataStream randomInstance(String dataStreamName, LongSupplier time randomBoolean() ? IndexMode.STANDARD : null, // IndexMode.TIME_SERIES triggers validation that many unit tests doesn't pass randomBoolean() ? DataStreamLifecycle.newBuilder().dataRetention(randomMillisUpToYear9999()).build() : null, failureStore, - failureIndices, - replicated == false && randomBoolean(), - randomBoolean() - ? new DataStreamAutoShardingEvent( - indices.get(indices.size() - 1).getName(), - randomIntBetween(1, 10), - randomMillisUpToYear9999() + DataStream.DataStreamIndices.backingIndicesBuilder(indices) + .setRolloverOnWrite(replicated == false && randomBoolean()) + .setAutoShardingEvent( + randomBoolean() + ? new DataStreamAutoShardingEvent( + indices.get(indices.size() - 1).getName(), + randomIntBetween(1, 10), + randomMillisUpToYear9999() + ) + : null ) - : null + .build(), + DataStream.DataStreamIndices.failureIndicesBuilder(failureIndices) + .setRolloverOnWrite(failureStore && replicated == false && randomBoolean()) + .setAutoShardingEvent( + failureStore && randomBoolean() + ? new DataStreamAutoShardingEvent( + indices.get(indices.size() - 1).getName(), + randomIntBetween(1, 10), + randomMillisUpToYear9999() + ) + : null + ) + .build() ); } diff --git a/test/framework/src/main/java/org/elasticsearch/common/lucene/store/ESIndexInputTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/lucene/store/ESIndexInputTestCase.java index 2b4e7fd4c7517..63b7dd88cb44e 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/lucene/store/ESIndexInputTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/lucene/store/ESIndexInputTestCase.java @@ -74,36 +74,45 @@ protected byte[] randomReadAndSlice(IndexInput indexInput, int length) throws IO switch (readStrategy) { case 0, 1, 2, 3: if (length - readPos >= Long.BYTES && readStrategy <= 0) { - long read = indexInput.readLong(); - ByteBuffer.wrap(output, readPos, Long.BYTES).order(ByteOrder.LITTLE_ENDIAN).putLong(read); + ByteBuffer.wrap(output, readPos, Long.BYTES).order(ByteOrder.LITTLE_ENDIAN).putLong(indexInput.readLong()); readPos += Long.BYTES; - if (indexInput instanceof RandomAccessInput randomAccessInput) { - assertEquals(read, randomAccessInput.readLong(indexInput.getFilePointer() - Long.BYTES)); - indexInput.seek(readPos); - } } else if (length - readPos >= Integer.BYTES && readStrategy <= 1) { - int read = indexInput.readInt(); - ByteBuffer.wrap(output, readPos, Integer.BYTES).order(ByteOrder.LITTLE_ENDIAN).putInt(read); + ByteBuffer.wrap(output, readPos, Integer.BYTES).order(ByteOrder.LITTLE_ENDIAN).putInt(indexInput.readInt()); readPos += Integer.BYTES; - if (indexInput instanceof RandomAccessInput randomAccessInput) { - assertEquals(read, randomAccessInput.readInt(indexInput.getFilePointer() - Integer.BYTES)); - indexInput.seek(readPos); - } } else if (length - readPos >= Short.BYTES && readStrategy <= 2) { - short read = indexInput.readShort(); - ByteBuffer.wrap(output, readPos, Short.BYTES).order(ByteOrder.LITTLE_ENDIAN).putShort(read); + ByteBuffer.wrap(output, readPos, Short.BYTES).order(ByteOrder.LITTLE_ENDIAN).putShort(indexInput.readShort()); readPos += Short.BYTES; - if (indexInput instanceof RandomAccessInput randomAccessInput) { - assertEquals(read, randomAccessInput.readShort(indexInput.getFilePointer() - Short.BYTES)); - indexInput.seek(readPos); - } } else { - byte read = indexInput.readByte(); - output[readPos++] = read; - if (indexInput instanceof RandomAccessInput randomAccessInput) { - assertEquals(read, randomAccessInput.readByte(indexInput.getFilePointer() - 1)); + output[readPos++] = indexInput.readByte(); + } + if (indexInput instanceof RandomAccessInput randomAccessInput && randomBoolean()) { + final var randomAccessReadStart = between(0, length - 1); + final int randomAccessReadEnd; + if (length - randomAccessReadStart >= Long.BYTES && randomBoolean()) { + ByteBuffer.wrap(output, randomAccessReadStart, Long.BYTES) + .order(ByteOrder.LITTLE_ENDIAN) + .putLong(randomAccessInput.readLong(randomAccessReadStart)); + randomAccessReadEnd = randomAccessReadStart + Long.BYTES; + } else if (length - randomAccessReadStart >= Integer.BYTES && randomBoolean()) { + ByteBuffer.wrap(output, randomAccessReadStart, Integer.BYTES) + .order(ByteOrder.LITTLE_ENDIAN) + .putInt(randomAccessInput.readInt(randomAccessReadStart)); + randomAccessReadEnd = randomAccessReadStart + Integer.BYTES; + } else if (length - randomAccessReadStart >= Short.BYTES && randomBoolean()) { + ByteBuffer.wrap(output, randomAccessReadStart, Short.BYTES) + .order(ByteOrder.LITTLE_ENDIAN) + .putShort(randomAccessInput.readShort(randomAccessReadStart)); + randomAccessReadEnd = randomAccessReadStart + Short.BYTES; + } else { + output[randomAccessReadStart] = randomAccessInput.readByte(randomAccessReadStart); + randomAccessReadEnd = randomAccessReadStart + 1; + } + if (randomAccessReadStart <= readPos && readPos <= randomAccessReadEnd && randomBoolean()) { + readPos = between(readPos, randomAccessReadEnd); indexInput.seek(readPos); } + + indexInput.seek(readPos); // BUG these random-access reads shouldn't affect the current position } break; case 4: diff --git a/test/framework/src/main/java/org/elasticsearch/indices/SystemIndexThreadPoolTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/SystemIndexThreadPoolTestCase.java deleted file mode 100644 index e105d61f7ee0a..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/indices/SystemIndexThreadPoolTestCase.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.indices; - -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Phaser; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.startsWith; - -/** - * Tests to verify that system indices are bypassing user-space thread pools - * - *

We can block thread pools by setting them to one thread and no queue, then submitting - * threads that wait on a countdown latch. This lets us verify that operations on system indices - * are being directed to other thread pools.

- * - *

When implementing this class, don't forget to override {@link ESIntegTestCase#nodePlugins()} if - * the relevant system index is defined in a plugin.

- */ -public abstract class SystemIndexThreadPoolTestCase extends ESIntegTestCase { - - private static final String USER_INDEX = "user_index"; - - // For system indices that use ExecutorNames.CRITICAL_SYSTEM_INDEX_THREAD_POOLS, we'll want to - // block normal system index thread pools as well. - protected Set threadPoolsToBlock() { - return Set.of(ThreadPool.Names.GET, ThreadPool.Names.WRITE, ThreadPool.Names.SEARCH); - } - - protected void runWithBlockedThreadPools(Runnable runnable) { - Phaser phaser = new Phaser(); - Runnable waitAction = () -> { - phaser.arriveAndAwaitAdvance(); - phaser.arriveAndAwaitAdvance(); - }; - phaser.register(); // register this test's thread - - for (String nodeName : internalCluster().getNodeNames()) { - ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, nodeName); - for (String threadPoolName : threadPoolsToBlock()) { - ThreadPool.Info info = threadPool.info(threadPoolName); - phaser.bulkRegister(info.getMax()); - for (int i = 0; i < info.getMax(); i++) { - threadPool.executor(threadPoolName).submit(waitAction); - } - } - } - phaser.arriveAndAwaitAdvance(); - try { - runnable.run(); - } finally { - phaser.arriveAndAwaitAdvance(); - } - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/107625") - public void testUserThreadPoolsAreBlocked() { - assertAcked(client().admin().indices().prepareCreate(USER_INDEX)); - - runWithBlockedThreadPools(this::assertThreadPoolsBlocked); - - assertAcked(client().admin().indices().prepareDelete(USER_INDEX)); - } - - private void assertThreadPoolsBlocked() { - fillThreadPoolQueues(); // rejections are easier to check than timeouts - - var e1 = expectThrows( - EsRejectedExecutionException.class, - () -> client().prepareIndex(USER_INDEX).setSource(Map.of("foo", "bar")).get() - ); - assertThat(e1.getMessage(), startsWith("rejected execution of TimedRunnable")); - var e2 = expectThrows(EsRejectedExecutionException.class, () -> client().prepareGet(USER_INDEX, "id").get()); - assertThat(e2.getMessage(), startsWith("rejected execution of ActionRunnable")); - var e3 = expectThrows( - SearchPhaseExecutionException.class, - () -> client().prepareSearch(USER_INDEX) - .setQuery(QueryBuilders.matchAllQuery()) - // Request times out if max concurrent shard requests is set to 1 - .setMaxConcurrentShardRequests(usually() ? SearchRequest.DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS : randomIntBetween(2, 10)) - .get() - ); - assertThat(e3.getMessage(), containsString("all shards failed")); - } - - private void fillThreadPoolQueues() { - for (String nodeName : internalCluster().getNodeNames()) { - ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class, nodeName); - for (String threadPoolName : threadPoolsToBlock()) { - ThreadPool.Info info = threadPool.info(threadPoolName); - - // fill up the queue - for (int i = 0; i < info.getQueueSize().singles(); i++) { - try { - threadPool.executor(threadPoolName).submit(() -> {}); - } catch (EsRejectedExecutionException e) { - // we can't be sure that some other task won't get queued in a test cluster - // but we should put all the tasks in there anyway - } - } - } - } - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 83f7fdfe386c7..80f9f2abea184 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -2101,9 +2101,24 @@ protected static SecureRandom secureRandomFips(final byte[] seed) throws NoSuchA return secureRandomFips; } + /** + * The timeout used for the various "safe" wait methods such as {@link #safeAwait} and {@link #safeAcquire}. In tests we generally want + * these things to complete almost immediately, but sometimes the CI runner executes things rather slowly so we use {@code 10s} as a + * fairly relaxed definition of "immediately". + *

+ * A well-designed test should not need to wait for anything close to this duration when run in isolation. If you think you need to do + * so, instead seek a better way to write the test such that it does not need to wait for so long. Tests that take multiple seconds to + * complete are a big drag on CI times which slows everyone down. + *

+ * For instance, tests which verify things that require the passage of time ought to simulate this (e.g. using a {@link + * org.elasticsearch.common.util.concurrent.DeterministicTaskQueue}). Excessive busy-waits ought to be replaced by blocking waits (e.g. + * using a {@link CountDownLatch}) which release as soon as the condition is satisfied. + */ + public static final TimeValue SAFE_AWAIT_TIMEOUT = TimeValue.timeValueSeconds(10); + public static void safeAwait(CyclicBarrier barrier) { try { - barrier.await(10, TimeUnit.SECONDS); + barrier.await(SAFE_AWAIT_TIMEOUT.millis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); fail(e, "safeAwait: interrupted waiting for CyclicBarrier release"); @@ -2114,7 +2129,10 @@ public static void safeAwait(CyclicBarrier barrier) { public static void safeAwait(CountDownLatch countDownLatch) { try { - assertTrue("safeAwait: CountDownLatch did not reach zero within the timeout", countDownLatch.await(10, TimeUnit.SECONDS)); + assertTrue( + "safeAwait: CountDownLatch did not reach zero within the timeout", + countDownLatch.await(SAFE_AWAIT_TIMEOUT.millis(), TimeUnit.MILLISECONDS) + ); } catch (InterruptedException e) { Thread.currentThread().interrupt(); fail(e, "safeAwait: interrupted waiting for CountDownLatch to reach zero"); @@ -2123,7 +2141,10 @@ public static void safeAwait(CountDownLatch countDownLatch) { public static void safeAcquire(Semaphore semaphore) { try { - assertTrue("safeAcquire: Semaphore did not acquire permit within the timeout", semaphore.tryAcquire(10, TimeUnit.SECONDS)); + assertTrue( + "safeAcquire: Semaphore did not acquire permit within the timeout", + semaphore.tryAcquire(SAFE_AWAIT_TIMEOUT.millis(), TimeUnit.MILLISECONDS) + ); } catch (InterruptedException e) { Thread.currentThread().interrupt(); fail(e, "safeAcquire: interrupted waiting for Semaphore to acquire permit"); @@ -2134,7 +2155,7 @@ public static T safeAwait(SubscribableListener listener) { final var future = new PlainActionFuture(); listener.addListener(future); try { - return future.get(10, TimeUnit.SECONDS); + return future.get(SAFE_AWAIT_TIMEOUT.millis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new AssertionError("safeAwait: interrupted waiting for SubscribableListener", e); diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java index bc3723119afa9..dd7987642c58a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java @@ -31,13 +31,35 @@ /** * Test appender that can be used to verify that certain events were logged correctly */ -public class MockLogAppender { +public class MockLogAppender implements Releasable { private static final Map> mockAppenders = new ConcurrentHashMap<>(); private static final RealMockAppender parent = new RealMockAppender(); + // TODO: this can become final once the ctor is made private + private List loggers = List.of(); private final List expectations; private volatile boolean isAlive = true; + @Override + public void close() { + isAlive = false; + for (String logger : loggers) { + mockAppenders.compute(logger, (k, v) -> { + assert v != null; + v.remove(this); + return v.isEmpty() ? null : v; + }); + } + // check that all expectations have been evaluated before this is released + for (WrappedLoggingExpectation expectation : expectations) { + assertThat( + "Method assertMatched() not called on LoggingExpectation instance before release: " + expectation, + expectation.assertMatchedCalled, + is(true) + ); + } + } + private static class RealMockAppender extends AbstractAppender { RealMockAppender() { @@ -71,6 +93,11 @@ public MockLogAppender() { expectations = new CopyOnWriteArrayList<>(); } + private MockLogAppender(List loggers) { + this(); + this.loggers = loggers; + } + /** * Initialize the mock log appender with the log4j system. */ @@ -267,58 +294,57 @@ public String toString() { } } + public Releasable capturing(Class... classes) { + this.loggers = Arrays.stream(classes).map(Class::getCanonicalName).toList(); + addToMockAppenders(this, loggers); + return this; + } + + public Releasable capturing(String... names) { + this.loggers = Arrays.asList(names); + addToMockAppenders(this, loggers); + return this; + } + /** * Adds the list of class loggers to this {@link MockLogAppender}. * * Stops and runs some checks on the {@link MockLogAppender} once the returned object is released. */ - public Releasable capturing(Class... classes) { - return appendToLoggers(Arrays.stream(classes).map(Class::getCanonicalName).toList()); + public static MockLogAppender capture(Class... classes) { + return create(Arrays.stream(classes).map(Class::getCanonicalName).toList()); } /** * Same as above except takes string class names of each logger. */ - public Releasable capturing(String... names) { - return appendToLoggers(Arrays.asList(names)); + public static MockLogAppender capture(String... names) { + return create(Arrays.asList(names)); + } + + private static MockLogAppender create(List loggers) { + MockLogAppender appender = new MockLogAppender(loggers); + addToMockAppenders(appender, loggers); + return appender; } - private Releasable appendToLoggers(List loggers) { + private static void addToMockAppenders(MockLogAppender appender, List loggers) { for (String logger : loggers) { mockAppenders.compute(logger, (k, v) -> { if (v == null) { v = new CopyOnWriteArrayList<>(); } - v.add(this); + v.add(appender); return v; }); } - return () -> { - isAlive = false; - for (String logger : loggers) { - mockAppenders.compute(logger, (k, v) -> { - assert v != null; - v.remove(this); - return v.isEmpty() ? null : v; - }); - } - // check that all expectations have been evaluated before this is released - for (WrappedLoggingExpectation expectation : expectations) { - assertThat( - "Method assertMatched() not called on LoggingExpectation instance before release: " + expectation, - expectation.assertMatchedCalled, - is(true) - ); - } - }; } /** * Executes an action and verifies expectations against the provided logger */ public static void assertThatLogger(Runnable action, Class loggerOwner, MockLogAppender.LoggingExpectation expectation) { - MockLogAppender mockAppender = new MockLogAppender(); - try (var ignored = mockAppender.capturing(loggerOwner)) { + try (var mockAppender = MockLogAppender.capture(loggerOwner)) { mockAppender.addExpectation(expectation); action.run(); mockAppender.assertAllExpectationsMatched(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java index 40cdacb767d0f..e05c2dde930a9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java @@ -354,5 +354,10 @@ public T get(String path) { } return (T) context; } + + @Override + public String toString() { + return "JsonMapView{map=" + map + '}'; + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index ee7687398cf7b..89d10acb6ec45 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1319,8 +1319,7 @@ public void handleException(TransportException exp) {} .build() ); - MockLogAppender appender = new MockLogAppender(); - try (var ignored = appender.capturing("org.elasticsearch.transport.TransportService.tracer")) { + try (var appender = MockLogAppender.capture("org.elasticsearch.transport.TransportService.tracer")) { //////////////////////////////////////////////////////////////////////// // tests for included action type "internal:test" diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java index 49fb38b518dce..d555337f467ae 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/FeatureFlag.java @@ -16,7 +16,8 @@ */ public enum FeatureFlag { TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null), - FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null); + FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null), + SEMANTIC_TEXT_ENABLED("es.semantic_text_feature_flag_enabled=true", Version.fromString("8.15.0"), null); public final String systemProperty; public final Version from; diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java index b8e4f77f7da7b..0cbe3786fc03c 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/HistogramFieldMapper.java @@ -551,6 +551,11 @@ public void write(XContentBuilder b) throws IOException { b.endObject(); } + + @Override + public String fieldName() { + return name(); + } }; } } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/DeleteAutoscalingPolicyAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/DeleteAutoscalingPolicyAction.java index d3be1816924fb..9b44daf6dd427 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/DeleteAutoscalingPolicyAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/DeleteAutoscalingPolicyAction.java @@ -34,6 +34,7 @@ public String name() { } public Request(final String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.name = Objects.requireNonNull(name); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityAction.java index 4a356f74e03f8..90c2d664b421d 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityAction.java @@ -39,6 +39,7 @@ public static class Request extends AcknowledgedRequest roles, final SortedMap deciders) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.name = name; this.roles = roles; this.deciders = deciders; diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index 28983fe34df91..2f8cccdc303e6 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -816,7 +816,10 @@ private SingleForecast forecast(Metadata metadata, DataStream stream, long forec Map newIndices = new HashMap<>(); for (int i = 0; i < numberNewIndices; ++i) { final String uuid = UUIDs.randomBase64UUID(); - final Tuple rolledDataStreamInfo = stream.unsafeNextWriteIndexAndGeneration(state.metadata()); + final Tuple rolledDataStreamInfo = stream.unsafeNextWriteIndexAndGeneration( + state.metadata(), + stream.getBackingIndices() + ); stream = stream.unsafeRollover( new Index(rolledDataStreamInfo.v1(), uuid), rolledDataStreamInfo.v2(), diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 0a0cade089fab..a0917c1cef815 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -330,11 +330,12 @@ static DataStream updateLocalDataStream( // just copying the data stream is in this case safe. return remoteDataStream.copy() .setName(localDataStreamName) - .setIndices(List.of(backingIndexToFollow)) + .setBackingIndices( + // Replicated data streams can't be rolled over, so having the `rolloverOnWrite` flag set to `true` wouldn't make sense + // (and potentially even break things). + remoteDataStream.getBackingIndices().copy().setIndices(List.of(backingIndexToFollow)).setRolloverOnWrite(false).build() + ) .setReplicated(true) - // Replicated data streams can't be rolled over, so having the `rolloverOnWrite` flag set to `true` wouldn't make sense - // (and potentially even break things). - .setRolloverOnWrite(false) .build(); } else { if (localDataStream.isReplicated() == false) { @@ -376,7 +377,7 @@ static DataStream updateLocalDataStream( } return localDataStream.copy() - .setIndices(backingIndices) + .setBackingIndices(localDataStream.getBackingIndices().copy().setIndices(backingIndices).build()) .setGeneration(remoteDataStream.getGeneration()) .setMetadata(remoteDataStream.getMetadata()) .build(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequest.java index 5883c36c9e2c5..9e8e707db6b86 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequest.java @@ -14,7 +14,9 @@ public class GetBasicStatusRequest extends MasterNodeReadRequest { - public GetBasicStatusRequest() {} + public GetBasicStatusRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public GetBasicStatusRequest(StreamInput in) throws IOException { super(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequest.java index 93a0206ac70c3..cae967058fb73 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequest.java @@ -14,7 +14,9 @@ public class GetTrialStatusRequest extends MasterNodeReadRequest { - public GetTrialStatusRequest() {} + public GetTrialStatusRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public GetTrialStatusRequest(StreamInput in) throws IOException { super(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java index 602e521fe10e3..7e9b0ebf44bee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicRequest.java @@ -16,7 +16,9 @@ public class PostStartBasicRequest extends AcknowledgedRequest) () -> "unexpected failure during [" + TASK_SOURCE + "]", e); + var state = clusterService.lifecycleState(); + if (state == Lifecycle.State.STOPPED || state == Lifecycle.State.CLOSED) { + logger.debug("node shutdown during [" + TASK_SOURCE + "]", e); + } else { + logger.error("unexpected failure during [" + TASK_SOURCE + "]", e); + } } private ClusterState extendBasic(ClusterState currentState, LicensesMetadata currentLicenseMetadata) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java index 398b39b12aa19..e5fbc9e07955c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java @@ -18,7 +18,9 @@ public class XPackUsageRequest extends MasterNodeRequest { - public XPackUsageRequest() {} + public XPackUsageRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public XPackUsageRequest(StreamInput in) throws IOException { super(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/frozen/FreezeRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/frozen/FreezeRequest.java index f32fd515e7817..d1d04088dcdd7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/frozen/FreezeRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/frozen/FreezeRequest.java @@ -27,6 +27,7 @@ public class FreezeRequest extends AcknowledgedRequest implements private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; public FreezeRequest(String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indices = indices; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java index e96c6a7632ec1..ea4e53aced5fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/GetLicenseRequest.java @@ -14,7 +14,9 @@ public class GetLicenseRequest extends MasterNodeReadRequest { - public GetLicenseRequest() {} + public GetLicenseRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public GetLicenseRequest(StreamInput in) throws IOException { super(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/action/MigrateToDataTiersRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/action/MigrateToDataTiersRequest.java index e6b087c97cdb5..6584dcc279e85 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/action/MigrateToDataTiersRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/action/MigrateToDataTiersRequest.java @@ -53,6 +53,7 @@ public static MigrateToDataTiersRequest parse(XContentParser parser) throws IOEx } public MigrateToDataTiersRequest(@Nullable String legacyTemplateToDelete, @Nullable String nodeAttributeName) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.legacyTemplateToDelete = legacyTemplateToDelete; this.nodeAttributeName = nodeAttributeName; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/SetResetModeActionRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/SetResetModeActionRequest.java index 3d46b2dd5070f..6270c27ac463f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/SetResetModeActionRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/SetResetModeActionRequest.java @@ -44,6 +44,7 @@ public static SetResetModeActionRequest disabled(boolean deleteMetadata) { } SetResetModeActionRequest(boolean enabled, Boolean deleteMetadata) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.enabled = enabled; this.deleteMetadata = deleteMetadata != null && deleteMetadata; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ActivateAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ActivateAutoFollowPatternAction.java index 300d2844b7a2a..df917b4e97b7d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ActivateAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ActivateAutoFollowPatternAction.java @@ -34,6 +34,7 @@ public static class Request extends AcknowledgedRequest { private final boolean active; public Request(final String name, final boolean active) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.name = name; this.active = active; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java index b12f7bf2dc06a..b187e5e39dd33 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/CcrStatsAction.java @@ -45,7 +45,9 @@ public Request(StreamInput in) throws IOException { } } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } @Override public ActionRequestValidationException validate() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java index 8e7e9f8605245..e38a1cfd4a2cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/DeleteAutoFollowPatternAction.java @@ -32,6 +32,7 @@ public static class Request extends AcknowledgedRequest { private final String name; public Request(String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.name = name; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java index c405e4e81ff19..d979a4cf44b9b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java @@ -41,7 +41,9 @@ public static class Request extends MasterNodeReadRequest { private String[] followerIndices; - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public String[] getFollowerIndices() { return followerIndices; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java index 70f4f256c87e2..bd6ab5bb5af44 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java @@ -34,7 +34,9 @@ public static class Request extends MasterNodeReadRequest { private String name; - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java index 7ad8e5881e443..c6905b2d06a34 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PauseFollowAction.java @@ -31,6 +31,7 @@ public static class Request extends MasterNodeRequest { private final String followIndex; public Request(String followIndex) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.followIndex = Objects.requireNonNull(followIndex, "followIndex"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index 92902aa9962ab..333171d864c4f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -85,7 +85,9 @@ public static Request fromXContent(XContentParser parser, String name) throws IO private FollowParameters parameters = new FollowParameters(); private List leaderIndexExclusionPatterns = Collections.emptyList(); - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } @Override public ActionRequestValidationException validate() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 6570fb66a2755..db1e84aca9cda 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -85,7 +85,9 @@ public static Request fromXContent(final XContentParser parser) throws IOExcepti private FollowParameters parameters = new FollowParameters(); private ActiveShardCount waitForActiveShards = ActiveShardCount.NONE; - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public String getFollowerIndex() { return followerIndex; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java index 4cd84733b19e0..12ddea8d99578 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ResumeFollowAction.java @@ -54,7 +54,9 @@ public static Request fromXContent(final XContentParser parser, final String fol private String followerIndex; private FollowParameters parameters = new FollowParameters(); - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public String getFollowerIndex() { return followerIndex; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java index 808df5f8bccb0..9a5f011f39a1b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/UnfollowAction.java @@ -34,6 +34,7 @@ public static class Request extends AcknowledgedRequest implements Indi private final String followerIndex; public Request(String followerIndex) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.followerIndex = followerIndex; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/DeleteEnrichPolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/DeleteEnrichPolicyAction.java index e444232291101..82f98176838ee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/DeleteEnrichPolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/DeleteEnrichPolicyAction.java @@ -30,6 +30,7 @@ public static class Request extends MasterNodeRequest { - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java index 779ea535f74d9..5d629365a8096 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyAction.java @@ -34,6 +34,7 @@ public static class Request extends MasterNodeRequest { private boolean waitForCompletion; public Request(String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.name = Objects.requireNonNull(name, "name cannot be null"); this.waitForCompletion = true; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/GetEnrichPolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/GetEnrichPolicyAction.java index ef8229b407b56..37851a3641ebd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/GetEnrichPolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/GetEnrichPolicyAction.java @@ -39,10 +39,12 @@ public static class Request extends MasterNodeReadRequest { private final List names; public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.names = new ArrayList<>(); } public Request(String[] names) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.names = Arrays.asList(names); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/PutEnrichPolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/PutEnrichPolicyAction.java index 4ebbb75239879..d1031828e0522 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/PutEnrichPolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/PutEnrichPolicyAction.java @@ -37,6 +37,7 @@ public static class Request extends MasterNodeRequest { private String policyName; public Request(String policyName) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.policyName = policyName; } @@ -42,7 +43,9 @@ public Request(StreamInput in) throws IOException { policyName = in.readString(); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public String getPolicyName() { return policyName; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleAction.java index 41b29365b8866..d359498f33621 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleAction.java @@ -104,6 +104,7 @@ public static class Request extends AcknowledgedRequest { private final String[] policyNames; public Request(String... policyNames) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); if (policyNames == null) { throw new IllegalArgumentException("ids cannot be null"); } @@ -116,6 +117,7 @@ public Request(StreamInput in) throws IOException { } public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); policyNames = Strings.EMPTY_ARRAY; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequest.java index fe6754b735ef7..ebaaf42246251 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/PutLifecycleRequest.java @@ -38,6 +38,7 @@ public class PutLifecycleRequest extends AcknowledgedRequest { private final XContentType contentType; public Request(TaskType taskType, String inferenceEntityId, BytesReference content, XContentType contentType) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.taskType = taskType; this.inferenceEntityId = inferenceEntityId; this.content = content; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java index 631aa77a282ef..f82ee8b73c7a2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResults.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.InferenceResults; import org.elasticsearch.inference.InferenceServiceResults; import org.elasticsearch.inference.TaskType; @@ -27,6 +28,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.TransportVersions.ML_INFERENCE_RERANK_NEW_RESPONSE_FORMAT; +import static org.elasticsearch.TransportVersions.ML_RERANK_DOC_OPTIONAL; public class RankedDocsResults implements InferenceServiceResults { public static final String NAME = "rerank_service_results"; @@ -66,7 +68,11 @@ public static ConstructingObjectParser createParser(boo * @param relevanceScore * @param text */ - public record RankedDoc(int index, float relevanceScore, String text) implements Writeable, ToXContentObject { + public record RankedDoc(int index, float relevanceScore, @Nullable String text) + implements + Comparable, + Writeable, + ToXContentObject { public static ConstructingObjectParser createParser(boolean ignoreUnknownFields) { ConstructingObjectParser parser = new ConstructingObjectParser<>( @@ -77,7 +83,7 @@ public static ConstructingObjectParser createParser(boolean ign ); parser.declareInt(ConstructingObjectParser.constructorArg(), INDEX_FIELD); parser.declareFloat(ConstructingObjectParser.constructorArg(), RELEVANCE_SCORE_FIELD); - parser.declareString(ConstructingObjectParser.constructorArg(), TEXT_FIELD); + parser.declareString(ConstructingObjectParser.optionalConstructorArg(), TEXT_FIELD); return parser; } @@ -95,7 +101,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(INDEX, index); builder.field(RELEVANCE_SCORE, relevanceScore); - builder.field(TEXT, text); + if (text != null) { + builder.field(TEXT, text); + } builder.endObject(); @@ -103,7 +111,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } public static RankedDoc of(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(ML_INFERENCE_RERANK_NEW_RESPONSE_FORMAT)) { + if (in.getTransportVersion().onOrAfter(ML_RERANK_DOC_OPTIONAL)) { + return new RankedDoc(in.readInt(), in.readFloat(), in.readOptionalString()); + } else if (in.getTransportVersion().onOrAfter(ML_INFERENCE_RERANK_NEW_RESPONSE_FORMAT)) { return new RankedDoc(in.readInt(), in.readFloat(), in.readString()); } else { return new RankedDoc(Integer.parseInt(in.readString()), Float.parseFloat(in.readString()), in.readString()); @@ -112,14 +122,18 @@ public static RankedDoc of(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(ML_INFERENCE_RERANK_NEW_RESPONSE_FORMAT)) { + if (out.getTransportVersion().onOrAfter(ML_RERANK_DOC_OPTIONAL)) { + out.writeInt(index); + out.writeFloat(relevanceScore); + out.writeOptionalString(text); + } else if (out.getTransportVersion().onOrAfter(ML_INFERENCE_RERANK_NEW_RESPONSE_FORMAT)) { out.writeInt(index); out.writeFloat(relevanceScore); - out.writeString(text); + out.writeString(text == null ? "" : text); } else { out.writeString(Integer.toString(index)); out.writeString(Float.toString(relevanceScore)); - out.writeString(text); + out.writeString(text == null ? "" : text); } } @@ -127,6 +141,11 @@ public Map asMap() { return Map.of(NAME, Map.of(INDEX, index, RELEVANCE_SCORE, relevanceScore, TEXT, text)); } + @Override + public int compareTo(RankedDoc other) { + return Float.compare(other.relevanceScore, this.relevanceScore); + } + public String toString() { return "RankedDoc{" + "index='" diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java index 23fed34d6889e..9b383b2652af4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java @@ -36,6 +36,7 @@ public static class Request extends MasterNodeRequest { private final StartTrainedModelDeploymentAction.TaskParams taskParams; public Request(StartTrainedModelDeploymentAction.TaskParams taskParams) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.taskParams = ExceptionsHelper.requireNonNull(taskParams, "taskParams"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java index 5c5e02559b1d5..40560f11b5039 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarAction.java @@ -36,6 +36,7 @@ public Request(StreamInput in) throws IOException { } public Request(String calendarId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java index 7d37dc8716387..efd35a3ba87f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteCalendarEventAction.java @@ -38,6 +38,7 @@ public Request(StreamInput in) throws IOException { } public Request(String calendarId, String eventId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName()); this.eventId = ExceptionsHelper.requireNonNull(eventId, ScheduledEvent.EVENT_ID.getPreferredName()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java index 48323692b7915..82d6c36273539 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDataFrameAnalyticsAction.java @@ -48,6 +48,7 @@ public Request(StreamInput in) throws IOException { } public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); ackTimeout(DEFAULT_TIMEOUT); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java index 2681fadf8fc59..f25be9cd164a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteDatafeedAction.java @@ -37,6 +37,7 @@ public static class Request extends AcknowledgedRequest implements ToXC private boolean force; public Request(String datafeedId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.datafeedId = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java index 50cec50b2e255..782c7fa4a4db1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteFilterAction.java @@ -38,6 +38,7 @@ public Request(StreamInput in) throws IOException { } public Request(String filterId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.filterId = ExceptionsHelper.requireNonNull(filterId, FILTER_ID.getPreferredName()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java index f3e888ef9599c..5bf6a8e38e18d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteForecastAction.java @@ -40,6 +40,7 @@ public Request(StreamInput in) throws IOException { } public Request(String jobId, String forecastId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); this.forecastId = ExceptionsHelper.requireNonNull(forecastId, ForecastRequestStats.FORECAST_ID.getPreferredName()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java index 58b67e57acf26..99b045d19bdd0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java @@ -44,6 +44,7 @@ public static class Request extends AcknowledgedRequest { private boolean deleteUserAnnotations; public Request(String jobId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAction.java index 9cd19eab449a3..d76c4e2db064a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAction.java @@ -48,6 +48,7 @@ public Request(StreamInput in) throws IOException { } public Request(String id) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.id = ExceptionsHelper.requireNonNull(id, TrainedModelConfig.MODEL_ID); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAliasAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAliasAction.java index 507060b1e51a4..27e895df5d415 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAliasAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAliasAction.java @@ -35,6 +35,7 @@ public static class Request extends AcknowledgedRequest { private final String modelId; public Request(String modelAlias, String modelId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.modelAlias = ExceptionsHelper.requireNonNull(modelAlias, MODEL_ALIAS); this.modelId = ExceptionsHelper.requireNonNull(modelId, TrainedModelConfig.MODEL_ID); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAssignmentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAssignmentAction.java index 04f1b3ddb2e26..9254d9ecc1425 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAssignmentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAssignmentAction.java @@ -30,6 +30,7 @@ public static class Request extends MasterNodeRequest { private final String modelId; public Request(String modelId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.modelId = ExceptionsHelper.requireNonNull(modelId, "model_id"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java index 64b042b61c2b6..305ed8c4fc607 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java @@ -60,6 +60,7 @@ public Request(StreamInput in) throws IOException { } public Request(DataFrameAnalyticsConfig config) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.config = config; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java index b270c4506ba4a..8fb1f3a91ab8b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FinalizeJobExecutionAction.java @@ -29,6 +29,7 @@ public static class Request extends MasterNodeRequest { private String[] jobIds; public Request(String[] jobIds) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.jobIds = jobIds; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushTrainedModelCacheAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushTrainedModelCacheAction.java index bdba626676b2d..c24fc159769e1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushTrainedModelCacheAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushTrainedModelCacheAction.java @@ -27,11 +27,11 @@ private FlushTrainedModelCacheAction() { public static class Request extends AcknowledgedRequest { public Request() { - super(); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); } Request(TimeValue timeout) { - super(timeout); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, timeout); } public Request(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java index 1bd266c68a65a..e509b84b06ae1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsAction.java @@ -50,6 +50,7 @@ public Request(String datafeedId) { } public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); local(true); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java index 1a63eda0d687d..fafb9afa99f85 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java @@ -70,6 +70,7 @@ public static class Request extends MasterNodeReadRequest { private boolean allowNoMatch = true; public Request(String datafeedId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.datafeedId = ExceptionsHelper.requireNonNull(datafeedId, DatafeedConfig.ID.getPreferredName()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobModelSnapshotsUpgradeStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobModelSnapshotsUpgradeStatsAction.java index e5542593df4e4..ec49603c89cb8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobModelSnapshotsUpgradeStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobModelSnapshotsUpgradeStatsAction.java @@ -61,6 +61,7 @@ public static class Request extends MasterNodeReadRequest { + public static class Request extends MasterNodeRequest { - public Request(TimeValue timeout) { - super(timeout); + private final TimeValue requestTimeout; + + public Request(TimeValue masterNodeTimeout, TimeValue requestTimeout) { + super(masterNodeTimeout); + this.requestTimeout = Objects.requireNonNull(requestTimeout); } public Request(StreamInput in) throws IOException { super(in); + this.requestTimeout = in.readTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeTimeValue(this.requestTimeout); + } + + public TimeValue requestTimeout() { + return requestTimeout; } @Override @@ -50,9 +65,14 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new CancellableTask(id, type, action, "get_ml_autoscaling_resources", parentTaskId, headers); } + @Override + public ActionRequestValidationException validate() { + return null; + } + @Override public int hashCode() { - return Objects.hash(ackTimeout()); + return Objects.hash(requestTimeout); } @Override @@ -64,7 +84,7 @@ public boolean equals(Object obj) { return false; } GetMlAutoscalingStats.Request other = (GetMlAutoscalingStats.Request) obj; - return Objects.equals(ackTimeout(), other.ackTimeout()); + return Objects.equals(requestTimeout, other.requestTimeout); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlMemoryAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlMemoryAction.java index e8b345b3c3ff6..4664dbe8f7bc0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlMemoryAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlMemoryAction.java @@ -68,6 +68,7 @@ public static class Request extends AcknowledgedRequest { private final String nodeId; public Request(String nodeId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.nodeId = ExceptionsHelper.requireNonNull(nodeId, "nodeId"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index b6f852605db9f..cf17a828930c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -55,10 +55,12 @@ public static Request parseRequest(String jobId, XContentParser parser) { private JobParams jobParams; public Request(JobParams jobParams) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.jobParams = Objects.requireNonNull(jobParams); } public Request(String jobId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.jobParams = new JobParams(jobId); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java index fe26cdb0377fd..82db002e42043 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDataFrameAnalyticsAction.java @@ -62,6 +62,7 @@ public Request(StreamInput in) throws IOException { } public Request(DataFrameAnalyticsConfig config) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.config = config; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java index 12e9b4f2967d0..f79d2af49f536 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutDatafeedAction.java @@ -43,6 +43,7 @@ public static Request parseRequest(String datafeedId, IndicesOptions indicesOpti private final DatafeedConfig datafeed; public Request(DatafeedConfig datafeed) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.datafeed = datafeed; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java index 9d8fca699df2d..60d7f0008c0de 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutJobAction.java @@ -51,6 +51,7 @@ public static Request parseRequest(String jobId, XContentParser parser, IndicesO public Request(Job.Builder jobBuilder) { // Validate the jobBuilder immediately so that errors can be detected prior to transportation. + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); jobBuilder.validateInputFields(); // Validate that detector configs are unique. // This validation logically belongs to validateInputFields call but we perform it only for PUT action to avoid BWC issues which diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java index 2e5a475369510..25d32d19aef8d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java @@ -75,6 +75,7 @@ public Request(TrainedModelConfig config, boolean deferDefinitionDecompression) } public Request(TrainedModelConfig config, boolean deferDefinitionDecompression, boolean waitForCompletion) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.config = config; this.deferDefinitionDecompression = deferDefinitionDecompression; this.waitForCompletion = waitForCompletion; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAliasAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAliasAction.java index 9f0b5880f5c51..3ba91390f10d1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAliasAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAliasAction.java @@ -48,6 +48,7 @@ public static class Request extends AcknowledgedRequest { private final boolean reassign; public Request(String modelAlias, String modelId, boolean reassign) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.modelAlias = ExceptionsHelper.requireNonNull(modelAlias, MODEL_ALIAS); this.modelId = ExceptionsHelper.requireNonNull(modelId, TrainedModelConfig.MODEL_ID); this.reassign = reassign; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java index b7fcb98426cc0..a588f74426993 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java @@ -76,6 +76,7 @@ public Request( int totalParts, boolean allowOverwriting ) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.modelId = ExceptionsHelper.requireNonNull(modelId, TrainedModelConfig.MODEL_ID); this.definition = ExceptionsHelper.requireNonNull(definition, DEFINITION); this.part = part; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java index 1abae7be95011..106f37a378897 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java @@ -70,6 +70,7 @@ public Request( @Nullable List scores, boolean allowOverwriting ) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.modelId = ExceptionsHelper.requireNonNull(modelId, TrainedModelConfig.MODEL_ID); this.vocabulary = ExceptionsHelper.requireNonNull(vocabulary, VOCABULARY); this.merges = Optional.ofNullable(merges).orElse(List.of()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ResetJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ResetJobAction.java index bc74f16eea0e5..548fd80da73de 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ResetJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ResetJobAction.java @@ -57,6 +57,7 @@ public static class Request extends AcknowledgedRequest { private boolean deleteUserAnnotations; public Request(String jobId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java index eb975133e71eb..0dd6fd8b59669 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/RevertModelSnapshotAction.java @@ -63,7 +63,9 @@ public static Request parseRequest(String jobId, String snapshotId, XContentPars private boolean deleteInterveningResults; private boolean force; - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); @@ -74,6 +76,7 @@ public Request(StreamInput in) throws IOException { } public Request(String jobId, String snapshotId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); this.snapshotId = ExceptionsHelper.requireNonNull(snapshotId, SNAPSHOT_ID.getPreferredName()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java index 9a1574bd2b036..821caf001f3e0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java @@ -43,6 +43,7 @@ public static class Request extends AcknowledgedRequest implements ToXC } public Request(boolean enabled) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.enabled = enabled; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java index 67abda2b3eb64..00e6a546be5a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java @@ -72,6 +72,7 @@ public static Request parseRequest(String id, XContentParser parser) { private TimeValue timeout = DEFAULT_TIMEOUT; public Request(String id) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); setId(id); } @@ -81,7 +82,9 @@ public Request(StreamInput in) throws IOException { timeout = in.readTimeValue(); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public final void setId(String id) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameAnalyticsConfig.ID); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java index 18763a78fa456..deeed6df87064 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java @@ -66,14 +66,17 @@ public static Request parseRequest(String datafeedId, XContentParser parser) { private DatafeedParams params; public Request(String datafeedId, long startTime) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.params = new DatafeedParams(datafeedId, startTime); } public Request(String datafeedId, String startTime) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.params = new DatafeedParams(datafeedId, startTime); } public Request(DatafeedParams params) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.params = params; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index 8d9da97538e11..b3cf9f16c3c82 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -140,9 +140,12 @@ public static Request parseRequest(String modelId, String deploymentId, XContent private int queueCapacity = 1024; private Priority priority = Priority.NORMAL; - private Request() {} + private Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public Request(String modelId, String deploymentId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); setModelId(modelId); setDeploymentId(deploymentId); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDataFrameAnalyticsAction.java index d23f222b9687b..513a4d7b2ea8e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDataFrameAnalyticsAction.java @@ -57,6 +57,7 @@ public Request(StreamInput in) throws IOException { } public Request(DataFrameAnalyticsConfigUpdate update) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.update = update; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java index 694ca39d9cd49..0757f1f1dc7e7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateDatafeedAction.java @@ -43,6 +43,7 @@ public static Request parseRequest(String datafeedId, @Nullable IndicesOptions i private DatafeedUpdate update; public Request(DatafeedUpdate update) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.update = update; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index 15cd272d12b8b..33856bfcefbb7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -46,6 +46,7 @@ public Request(String jobId, JobUpdate update) { } private Request(String jobId, JobUpdate update, boolean isInternal) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.jobId = jobId; this.update = update; this.isInternal = isInternal; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelAssignmentRoutingInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelAssignmentRoutingInfoAction.java index 5cd55a201c45d..fd1b179da8919 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelAssignmentRoutingInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelAssignmentRoutingInfoAction.java @@ -33,6 +33,7 @@ public static class Request extends MasterNodeRequest { private final RoutingInfoUpdate update; public Request(String nodeId, String deploymentId, RoutingInfoUpdate update) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.nodeId = ExceptionsHelper.requireNonNull(nodeId, "node_id"); this.deploymentId = ExceptionsHelper.requireNonNull(deploymentId, "deployment_id"); this.update = ExceptionsHelper.requireNonNull(update, "update"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java index bb113a9b3e1e8..62a7d84c60a62 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java @@ -64,9 +64,12 @@ public static Request parseRequest(String deploymentId, XContentParser parser) { private String deploymentId; private int numberOfAllocations; - private Request() {} + private Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public Request(String deploymentId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); setDeploymentId(deploymentId); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpgradeJobModelSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpgradeJobModelSnapshotAction.java index 7fbcffa476159..abe481c926fdb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpgradeJobModelSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpgradeJobModelSnapshotAction.java @@ -71,6 +71,7 @@ public static UpgradeJobModelSnapshotAction.Request parseRequest(XContentParser } public Request(String jobId, String snapshotId, TimeValue timeValue, boolean waitForCompletion) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID); this.snapshotId = ExceptionsHelper.requireNonNull(snapshotId, SNAPSHOT_ID); this.timeout = timeValue == null ? DEFAULT_TIMEOUT : timeValue; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResults.java index b8b75e2bf7eb4..412ccfa7b24a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextSimilarityInferenceResults.java @@ -58,6 +58,10 @@ public String getResultsField() { return resultsField; } + public double score() { + return score; + } + @Override public Double predictedValue() { return score; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java index 2ddbf8bd63f49..4e914cba1ff0d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java @@ -69,6 +69,13 @@ public static TextSimilarityConfigUpdate fromMap(Map map) { private final String resultsField; private final TextSimilarityConfig.SpanScoreFunction spanScoreFunction; + public TextSimilarityConfigUpdate(String text) { + super((TokenizationUpdate) null); + this.text = ExceptionsHelper.requireNonNull(text, TEXT); + this.resultsField = null; + this.spanScoreFunction = null; + } + public TextSimilarityConfigUpdate( String text, @Nullable String resultsField, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/packageloader/action/GetTrainedModelPackageConfigAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/packageloader/action/GetTrainedModelPackageConfigAction.java index 8fcc977e3faeb..ea67dfdfb1857 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/packageloader/action/GetTrainedModelPackageConfigAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/packageloader/action/GetTrainedModelPackageConfigAction.java @@ -37,10 +37,12 @@ public static class Request extends MasterNodeRequest { - public MonitoringMigrateAlertsRequest() {} + public MonitoringMigrateAlertsRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } public MonitoringMigrateAlertsRequest(StreamInput in) throws IOException { super(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java index 06a6b4c2a072c..7f1e81164a513 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/PutRollupJobAction.java @@ -38,6 +38,7 @@ public static class Request extends AcknowledgedRequest implements Indi private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); public Request(RollupJobConfig config) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.config = config; } @@ -48,6 +49,7 @@ public Request(StreamInput in) throws IOException { public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); } public static Request fromXContent(final XContentParser parser, final String id) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java index 3cb7b5b07fc1b..fba742e288032 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java @@ -101,6 +101,7 @@ public MountSearchableSnapshotRequest( boolean waitForCompletion, Storage storage ) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.mountedIndexName = Objects.requireNonNull(mountedIndexName); this.repositoryName = Objects.requireNonNull(repositoryName); this.snapshotName = Objects.requireNonNull(snapshotName); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java index 039ed8aa5fb64..f85ca260c3fff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java @@ -166,16 +166,4 @@ public void writeTo(StreamOutput out) throws IOException { public ExpressionRoleMapping getMapping() { return new ExpressionRoleMapping(name, rules, roles, roleTemplates, metadata, enabled); } - - public static PutRoleMappingRequest fromMapping(ExpressionRoleMapping mapping) { - var request = new PutRoleMappingRequest(); - request.setName(mapping.getName()); - request.setEnabled(mapping.isEnabled()); - request.setRoles(mapping.getRoles()); - request.setRoleTemplates(mapping.getRoleTemplates()); - request.setRules(mapping.getExpression()); - request.setMetadata(mapping.getMetadata()); - - return request; - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java index 88a930063190b..d46c21f080308 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java @@ -9,8 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; @@ -35,8 +34,8 @@ public PutRoleMappingRequestBuilder(ElasticsearchClient client) { /** * Populate the put role request from the source and the role's name */ - public PutRoleMappingRequestBuilder source(String name, BytesReference source, XContentType xContentType) throws IOException { - ExpressionRoleMapping mapping = ExpressionRoleMapping.parse(name, source, xContentType); + public PutRoleMappingRequestBuilder source(String name, XContentParser parser) throws IOException { + ExpressionRoleMapping mapping = ExpressionRoleMapping.parse(name, parser); request.setName(name); request.setEnabled(mapping.isEnabled()); request.setRoles(mapping.getRoles()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/GetSecuritySettingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/GetSecuritySettingsAction.java index bc8d81cd268ad..7623a7f65af34 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/GetSecuritySettingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/GetSecuritySettingsAction.java @@ -34,9 +34,13 @@ public GetSecuritySettingsAction() { public static class Request extends MasterNodeReadRequest { - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } - public Request(StreamInput in) throws IOException {} + public Request(StreamInput in) throws IOException { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } @Override public void writeTo(StreamOutput out) throws IOException {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/UpdateSecuritySettingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/UpdateSecuritySettingsAction.java index 20feb0faf5033..3cce133749e44 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/UpdateSecuritySettingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/UpdateSecuritySettingsAction.java @@ -72,12 +72,14 @@ public Request( Map tokensIndexSettings, Map profilesIndexSettings ) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.mainIndexSettings = Objects.requireNonNullElse(mainIndexSettings, Collections.emptyMap()); this.tokensIndexSettings = Objects.requireNonNullElse(tokensIndexSettings, Collections.emptyMap()); this.profilesIndexSettings = Objects.requireNonNullElse(profilesIndexSettings, Collections.emptyMap()); } public Request(StreamInput in) throws IOException { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.mainIndexSettings = in.readGenericMap(); this.tokensIndexSettings = in.readGenericMap(); this.profilesIndexSettings = in.readGenericMap(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 49be4c5d466b2..eb4b7efdb88b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -49,7 +49,11 @@ static RoleDescriptor kibanaAdminUser(String name, Map metadata) null, null, metadata, - null + null, + null, + null, + null, + "Grants access to all features in Kibana." ); } @@ -408,7 +412,13 @@ static RoleDescriptor kibanaSystem(String name) { getRemoteIndicesReadPrivileges("traces-apm-*") }, null, null, - null + "Grants access necessary for the Kibana system user to read from and write to the Kibana indices, " + + "manage index templates and tokens, and check the availability of the Elasticsearch cluster. " + + "It also permits activating, searching, and retrieving user profiles, " + + "as well as updating user profile data for the kibana-* namespace. " + + "Additionally, this role grants read access to the .monitoring-* indices " + + "and read and write access to the .reporting-* indices. " + + "Note: This role should not be assigned to users as the granted permissions may change between releases." ); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index dd8f34a60fa1f..2e7a5271103f4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -103,7 +103,11 @@ public class ReservedRolesStore implements BiConsumer, ActionListene ) ), null, - null + "Grants full access to cluster management and data indices. " + + "This role also grants direct read-only access to restricted indices like .security. " + + "A user with this role can impersonate any other user in the system, " + + "manage security and create roles with unlimited privileges. " + + "Take extra care when assigning it to a user." ); private static final Map ALL_RESERVED_ROLES = initializeReservedRoles(); @@ -203,7 +207,12 @@ private static Map initializeReservedRoles() { getRemoteIndicesReadPrivileges("metricbeat-*") }, null, null, - null + "Grants the minimum privileges required for any user of X-Pack monitoring other than those required to use Kibana. " + + "This role grants access to the monitoring indices and grants privileges necessary " + + "for reading basic cluster information. " + + "This role also includes all Kibana privileges for the Elastic Stack monitoring features. " + + "Monitoring users should also be assigned the kibana_admin role, " + + "or another role with access to the Kibana instance." ) ), entry( @@ -232,7 +241,16 @@ private static Map initializeReservedRoles() { ) .build() }, null, - MetadataUtils.DEFAULT_RESERVED_METADATA + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants the minimum privileges required to write data into the monitoring indices (.monitoring-*). " + + "This role also has the privileges necessary to create Metricbeat indices (metricbeat-*) " + + "and write data into them." ) ), entry( @@ -251,7 +269,11 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, - null + null, + null, + null, + null, + "Grants the minimum privileges required to collect monitoring data for the Elastic Stack." ) ), entry( @@ -261,7 +283,14 @@ private static Map initializeReservedRoles() { new String[] { "manage_index_templates", "manage_pipeline" }, null, null, - MetadataUtils.DEFAULT_RESERVED_METADATA + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants access to manage all index templates and all ingest pipeline configurations." ) ), // reporting_user doesn't have any privileges in Elasticsearch, and Kibana authorizes privileges based on this role @@ -275,7 +304,14 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.getDeprecatedReservedMetadata("Please use Kibana feature privileges instead"), - null + null, + null, + null, + null, + "Grants the specific privileges required for users of X-Pack reporting other than those required to use Kibana. " + + "This role grants access to the reporting indices; each user has access to only their own reports. " + + "Reporting users should also be assigned additional roles that grant access to Kibana as well as read access " + + "to the indices that will be used to generate reports." ) ), entry(KibanaSystemUser.ROLE_NAME, kibanaSystemRoleDescriptor(KibanaSystemUser.ROLE_NAME)), @@ -286,7 +322,15 @@ private static Map initializeReservedRoles() { new String[] { "monitor", MonitoringBulkAction.NAME }, null, null, - MetadataUtils.DEFAULT_RESERVED_METADATA + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants access necessary for the Logstash system user to send system-level data (such as monitoring) to Elasticsearch. " + + "This role should not be assigned to users as the granted permissions may change between releases." ) ), entry( @@ -297,7 +341,14 @@ private static Map initializeReservedRoles() { new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(".management-beats").privileges("all").build() }, null, - MetadataUtils.DEFAULT_RESERVED_METADATA + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants access to the .management-beats index, which contains configuration information for the Beats." ) ), entry( @@ -311,7 +362,15 @@ private static Map initializeReservedRoles() { .privileges("create_index", "create") .build() }, null, - MetadataUtils.DEFAULT_RESERVED_METADATA + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants access necessary for the Beats system user to send system-level data (such as monitoring) to Elasticsearch. " + + "This role should not be assigned to users as the granted permissions may change between releases." ) ), entry( @@ -325,7 +384,14 @@ private static Map initializeReservedRoles() { .privileges("create_index", "create_doc") .build() }, null, - MetadataUtils.DEFAULT_RESERVED_METADATA + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants access necessary for the APM system user to send system-level data (such as monitoring) to Elasticsearch.\n" ) ), entry( @@ -381,7 +447,12 @@ private static Map initializeReservedRoles() { MetadataUtils.getDeprecatedReservedMetadata( "This role will be removed in a future major release. Please use editor and viewer roles instead" ), - null + null, + null, + null, + null, + "Grants the privileges required for APM users (such as read and view_index_metadata privileges " + + "on the apm-* and .ml-anomalies* indices)." ) ), entry( @@ -394,7 +465,11 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, - null + null, + null, + null, + null, + "Grants access necessary to manage inference models and performing inference." ) ), entry( @@ -407,7 +482,11 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, - null + null, + null, + null, + null, + "Grants access necessary to perform inference." ) ), entry( @@ -440,7 +519,15 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, - null + null, + null, + null, + null, + "Grants the minimum privileges required to view machine learning configuration, status, and work with results. " + + "This role grants monitor_ml cluster privileges, read access to the .ml-notifications and .ml-anomalies* indices " + + "(which store machine learning results), and write access to .ml-annotations* indices. " + + "Machine learning users also need index privileges for source and destination indices " + + "and roles that grant access to Kibana. " ) ), entry( @@ -474,7 +561,15 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, - null + null, + null, + null, + null, + "Provides all of the privileges of the machine_learning_user role plus the full use of the machine learning APIs. " + + "Grants manage_ml cluster privileges, read access to .ml-anomalies*, .ml-notifications*, .ml-state*, " + + ".ml-meta* indices and write access to .ml-annotations* indices. " + + "Machine learning administrators also need index privileges for source and destination indices " + + "and roles that grant access to Kibana." ) ), // DEPRECATED: to be removed in 9.0.0 @@ -501,7 +596,12 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.getDeprecatedReservedMetadata("Please use the [transform_admin] role instead"), - null + null, + null, + null, + null, + "Grants manage_data_frame_transforms cluster privileges, which enable you to manage transforms. " + + "This role also includes all Kibana privileges for the machine learning features." ) ), // DEPRECATED: to be removed in 9.0.0 @@ -528,7 +628,12 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.getDeprecatedReservedMetadata("Please use the [transform_user] role instead"), - null + null, + null, + null, + null, + "Grants monitor_data_frame_transforms cluster privileges, which enable you to use transforms. " + + "This role also includes all Kibana privileges for the machine learning features. " ) ), entry( @@ -549,7 +654,12 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, - null + null, + null, + null, + null, + "Grants manage_transform cluster privileges, which enable you to manage transforms. " + + "This role also includes all Kibana privileges for the machine learning features." ) ), entry( @@ -570,7 +680,12 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, - null + null, + null, + null, + null, + "Grants monitor_transform cluster privileges, which enable you to perform read-only operations related to " + + "transforms. This role also includes all Kibana privileges for the machine learning features." ) ), entry( @@ -585,7 +700,16 @@ private static Map initializeReservedRoles() { .allowRestrictedIndices(true) .build() }, null, - MetadataUtils.DEFAULT_RESERVED_METADATA + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Allows users to create and execute all Watcher actions. " + + "Grants read access to the .watches index. Also grants read access " + + "to the watch history and the triggered watches index." ) ), entry( @@ -604,7 +728,14 @@ private static Map initializeReservedRoles() { .privileges("read") .build() }, null, - MetadataUtils.DEFAULT_RESERVED_METADATA + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants read access to the .watches index, the get watch action and the watcher stats." ) ), entry( @@ -619,16 +750,50 @@ private static Map initializeReservedRoles() { .allowRestrictedIndices(true) .build() }, null, - MetadataUtils.DEFAULT_RESERVED_METADATA + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants access to the .logstash* indices for managing configurations, " + + "and grants necessary access for logstash-specific APIs exposed by the logstash x-pack plugin." ) ), entry( "rollup_user", - new RoleDescriptor("rollup_user", new String[] { "monitor_rollup" }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA) + new RoleDescriptor( + "rollup_user", + new String[] { "monitor_rollup" }, + null, + null, + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants monitor_rollup cluster privileges, which enable you to perform read-only operations related to rollups." + ) ), entry( "rollup_admin", - new RoleDescriptor("rollup_admin", new String[] { "manage_rollup" }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA) + new RoleDescriptor( + "rollup_admin", + new String[] { "manage_rollup" }, + null, + null, + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants manage_rollup cluster privileges, which enable you to manage and execute all rollup actions." + ) ), entry( "snapshot_user", @@ -645,7 +810,14 @@ private static Map initializeReservedRoles() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, - null + null, + null, + null, + null, + "Grants the necessary privileges to create snapshots of all the indices and to view their metadata. " + + "This role enables users to view the configuration of existing snapshot repositories and snapshot details. " + + "It does not grant authority to remove or add repositories or to restore snapshots. " + + "It also does not enable to change index settings or to read or update data stream or index data." ) ), entry( @@ -661,7 +833,14 @@ private static Map initializeReservedRoles() { .build(), RoleDescriptor.IndicesPrivileges.builder().indices(".enrich-*").privileges("manage", "write").build() }, null, - MetadataUtils.DEFAULT_RESERVED_METADATA + null, + null, + MetadataUtils.DEFAULT_RESERVED_METADATA, + null, + null, + null, + null, + "Grants access to manage all enrich indices (.enrich-*) and all operations on ingest pipelines." ) ), entry("viewer", buildViewerRoleDescriptor()), @@ -703,7 +882,11 @@ private static RoleDescriptor buildViewerRoleDescriptor() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, - null + null, + null, + null, + null, + "Grants read-only access to all features in Kibana (including Solutions) and to data indices." ); } @@ -750,7 +933,11 @@ private static RoleDescriptor buildEditorRoleDescriptor() { null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, - null + null, + null, + null, + null, + "Grants full access to all features in Kibana (including Solutions) and read-only access to data indices." ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/DeleteSnapshotLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/DeleteSnapshotLifecycleAction.java index 17a23f6b66b5b..6e083295b0863 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/DeleteSnapshotLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/DeleteSnapshotLifecycleAction.java @@ -33,9 +33,12 @@ public Request(StreamInput in) throws IOException { lifecycleId = in.readString(); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public Request(String lifecycleId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.lifecycleId = Objects.requireNonNull(lifecycleId, "id may not be null"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/ExecuteSnapshotLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/ExecuteSnapshotLifecycleAction.java index 8a8ecf3a747a8..442ff6b2bfb66 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/ExecuteSnapshotLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/ExecuteSnapshotLifecycleAction.java @@ -36,6 +36,7 @@ public static class Request extends AcknowledgedRequest implements ToXC private String lifecycleId; public Request(String lifecycleId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.lifecycleId = lifecycleId; } @@ -44,7 +45,9 @@ public Request(StreamInput in) throws IOException { lifecycleId = in.readString(); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public String getLifecycleId() { return this.lifecycleId; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/ExecuteSnapshotRetentionAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/ExecuteSnapshotRetentionAction.java index 9574ba7fff685..e4d698f48d252 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/ExecuteSnapshotRetentionAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/ExecuteSnapshotRetentionAction.java @@ -26,7 +26,9 @@ protected ExecuteSnapshotRetentionAction() { public static class Request extends AcknowledgedRequest implements ToXContentObject { - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public Request(StreamInput in) throws IOException { super(in); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java index d556c0fda5e7f..ad62b155da41c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java @@ -35,6 +35,7 @@ public static class Request extends AcknowledgedRequest implements ToXC private SnapshotLifecyclePolicy lifecycle; public Request(String lifecycleId, SnapshotLifecyclePolicy lifecycle) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.lifecycleId = lifecycleId; this.lifecycle = lifecycle; } @@ -46,7 +47,9 @@ public Request(StreamInput in) throws IOException { lifecycle = new SnapshotLifecyclePolicy(in); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public String getLifecycleId() { return this.lifecycleId; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StartSLMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StartSLMAction.java index d6deb7bda384f..666701ac1f885 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StartSLMAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StartSLMAction.java @@ -28,7 +28,9 @@ public Request(StreamInput in) throws IOException { super(in); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } @Override public int hashCode() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StopSLMAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StopSLMAction.java index 60be1b99cde8d..4aae048b5e5b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StopSLMAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/StopSLMAction.java @@ -28,7 +28,9 @@ public Request(StreamInput in) throws IOException { super(in); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } @Override public int hashCode() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java index 3623c659216d2..79ae38745934d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java @@ -34,7 +34,7 @@ public static class Request extends AcknowledgedRequest { private final boolean deleteDestIndex; public Request(String id, boolean force, boolean deleteDestIndex, TimeValue timeout) { - super(timeout); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, timeout); this.id = ExceptionsHelper.requireNonNull(id, TransformField.ID.getPreferredName()); this.force = force; this.deleteDestIndex = deleteDestIndex; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java index f06ba16d9da78..adebbba651f16 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java @@ -58,7 +58,7 @@ public static class Request extends AcknowledgedRequest implements ToXC private final TransformConfig config; public Request(TransformConfig config, TimeValue timeout) { - super(timeout); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, timeout); this.config = config; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PutTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PutTransformAction.java index 9d335b2ccdb34..496e826651572 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PutTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PutTransformAction.java @@ -57,7 +57,7 @@ public static class Request extends AcknowledgedRequest { private final boolean deferValidation; public Request(TransformConfig config, boolean deferValidation, TimeValue timeout) { - super(timeout); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, timeout); this.config = config; this.deferValidation = deferValidation; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ResetTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ResetTransformAction.java index 609dd33cbfa9e..5840e107c1d17 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ResetTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ResetTransformAction.java @@ -34,7 +34,7 @@ public static class Request extends AcknowledgedRequest { private final boolean force; public Request(String id, boolean force, TimeValue timeout) { - super(timeout); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, timeout); this.id = ExceptionsHelper.requireNonNull(id, TransformField.ID.getPreferredName()); this.force = force; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java index 3ecadd1b708cc..838a0650c8afa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java @@ -39,7 +39,7 @@ public static class Request extends AcknowledgedRequest { private final Instant from; public Request(String id, Instant from, TimeValue timeout) { - super(timeout); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, timeout); this.id = ExceptionsHelper.requireNonNull(id, TransformField.ID.getPreferredName()); this.from = from; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpgradeTransformsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpgradeTransformsAction.java index 3a36d9163e0c0..cdc0a53b6f0a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpgradeTransformsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpgradeTransformsAction.java @@ -40,7 +40,7 @@ public Request(StreamInput in) throws IOException { } public Request(boolean dryRun, TimeValue timeout) { - super(timeout); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, timeout); this.dryRun = dryRun; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ValidateTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ValidateTransformAction.java index de6435ad31dbc..55c21b91b11d8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ValidateTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ValidateTransformAction.java @@ -36,7 +36,7 @@ public static class Request extends AcknowledgedRequest { private final boolean deferValidation; public Request(TransformConfig config, boolean deferValidation, TimeValue timeout) { - super(timeout); + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, timeout); this.config = config; this.deferValidation = deferValidation; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/GetWatcherSettingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/GetWatcherSettingsAction.java index 576bd220853ce..902c6db07dc89 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/GetWatcherSettingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/GetWatcherSettingsAction.java @@ -30,9 +30,13 @@ public GetWatcherSettingsAction() { public static class Request extends MasterNodeReadRequest { - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } - public Request(StreamInput in) throws IOException {} + public Request(StreamInput in) throws IOException { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } @Override public void writeTo(StreamOutput out) throws IOException {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java index 29f4db51e146e..b6d999ebbf380 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java @@ -39,10 +39,12 @@ public static class Request extends AcknowledgedRequest { private final Map settings; public Request(Map settings) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.settings = settings; } public Request(StreamInput in) throws IOException { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.settings = in.readGenericMap(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java index 93cc7a18594d6..449179e4f18f6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/service/WatcherServiceRequest.java @@ -29,7 +29,9 @@ public WatcherServiceRequest(StreamInput in) throws IOException { command = Command.valueOf(in.readString().toUpperCase(Locale.ROOT)); } - public WatcherServiceRequest() {} + public WatcherServiceRequest() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + } /** * Starts the watcher service if not already started. diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java index 3be073b439828..603531f0aedf9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/results/RankedDocsResultsTests.java @@ -16,6 +16,8 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.TransportVersions.ML_RERANK_DOC_OPTIONAL; + public class RankedDocsResultsTests extends AbstractBWCSerializationTestCase { @Override @@ -33,7 +35,7 @@ public static RankedDocsResults createRandom() { } public static RankedDocsResults.RankedDoc createRandomDoc() { - return new RankedDocsResults.RankedDoc(randomIntBetween(0, 100), randomFloat(), randomAlphaOfLength(10)); + return new RankedDocsResults.RankedDoc(randomIntBetween(0, 100), randomFloat(), randomBoolean() ? null : randomAlphaOfLength(10)); } @Override @@ -45,7 +47,24 @@ protected RankedDocsResults mutateInstance(RankedDocsResults instance) throws IO @Override protected RankedDocsResults mutateInstanceForVersion(RankedDocsResults instance, TransportVersion fromVersion) { - return instance; + if (fromVersion.onOrAfter(ML_RERANK_DOC_OPTIONAL)) { + return instance; + } else { + var compatibleDocs = rankedDocsNullStringToEmpty(instance.getRankedDocs()); + return new RankedDocsResults(compatibleDocs); + } + } + + private List rankedDocsNullStringToEmpty(List rankedDocs) { + var result = new ArrayList(rankedDocs.size()); + for (var doc : rankedDocs) { + if (doc.text() == null) { + result.add(new RankedDocsResults.RankedDoc(doc.index(), doc.relevanceScore(), "")); + } else { + result.add(doc); + } + } + return result; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetMlAutoscalingStatsRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetMlAutoscalingStatsRequestTests.java index ee265538829d3..eb0b8420625ac 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetMlAutoscalingStatsRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetMlAutoscalingStatsRequestTests.java @@ -23,11 +23,14 @@ protected Writeable.Reader instanceReader() { @Override protected Request createTestInstance() { - return new Request(randomTimeValue(0, 10_000)); + return new Request(TimeValue.THIRTY_SECONDS, randomTimeValue(0, 10_000)); } @Override protected Request mutateInstance(Request instance) throws IOException { - return new Request(TimeValue.timeValueMillis(instance.ackTimeout().millis() + randomIntBetween(1, 1000))); + return new Request( + TimeValue.THIRTY_SECONDS, + TimeValue.timeValueMillis(instance.requestTimeout().millis() + randomIntBetween(1, 1000)) + ); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java index 6ba7dc6ac24cd..9d3c4d684e194 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -21,6 +20,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.indices.SystemIndexDescriptor; @@ -297,7 +297,7 @@ public void testAddDocMappingIfMissing() { {"_doc":{"properties":{"some-field":{"type":"long"}}}}""", client, clusterState, - MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT, + TimeValue.THIRTY_SECONDS, ActionTestUtils.assertNoFailureListener(Assert::assertTrue), 1 ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java index f9fdc0c8362e5..f72ca14c37e14 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAliasTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.AdminClient; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ClusterAdminClient; @@ -371,7 +370,7 @@ private void createIndexAndAliasIfNecessary(ClusterState clusterState) { TestIndexNameExpressionResolver.newInstance(), TEST_INDEX_PREFIX, TEST_INDEX_ALIAS, - MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT, + TimeValue.THIRTY_SECONDS, listener ); } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java index 13ef198863284..3376073bded02 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java @@ -331,6 +331,7 @@ public static class Request extends MasterNodeReadRequest implements In private String[] indices; public Request(String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.indices = indices; } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/DeleteAnalyticsCollectionAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/DeleteAnalyticsCollectionAction.java index 43601ab1b2943..ac5c5761efe13 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/DeleteAnalyticsCollectionAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/DeleteAnalyticsCollectionAction.java @@ -44,6 +44,7 @@ public Request(StreamInput in) throws IOException { } public Request(String collectionName) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.collectionName = collectionName; } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/GetAnalyticsCollectionAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/GetAnalyticsCollectionAction.java index f9eeb2cca6d2e..d54c119e083ed 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/GetAnalyticsCollectionAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/GetAnalyticsCollectionAction.java @@ -41,6 +41,7 @@ public static class Request extends MasterNodeReadRequest implements To public static ParseField NAMES_FIELD = new ParseField("names"); public Request(String[] names) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.names = Objects.requireNonNull(names, "Collection names cannot be null"); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionAction.java index 659c58d2bd1b8..108cebae155be 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PutAnalyticsCollectionAction.java @@ -43,6 +43,7 @@ public Request(StreamInput in) throws IOException { } public Request(String name) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.name = name; } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index 072dc5265fe60..7c57212d0f574 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -118,15 +118,16 @@ public void testDoNotLogWithInfo() throws IOException { Map colA = Map.of("name", "DO_NOT_LOG_ME", "type", "integer"); assertEquals(List.of(colA), result.get("columns")); assertEquals(List.of(List.of(1)), result.get("values")); - try (InputStream log = cluster.getNodeLog(0, LogType.SERVER)) { - Streams.readAllLines(log, line -> { assertThat(line, not(containsString("DO_NOT_LOG_ME"))); }); + for (int i = 0; i < cluster.getNumNodes(); i++) { + try (InputStream log = cluster.getNodeLog(i, LogType.SERVER)) { + Streams.readAllLines(log, line -> assertThat(line, not(containsString("DO_NOT_LOG_ME")))); + } } } finally { setLoggingLevel(null); } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/108367") public void testDoLogWithDebug() throws IOException { try { setLoggingLevel("DEBUG"); @@ -136,15 +137,17 @@ public void testDoLogWithDebug() throws IOException { Map colA = Map.of("name", "DO_LOG_ME", "type", "integer"); assertEquals(List.of(colA), result.get("columns")); assertEquals(List.of(List.of(1)), result.get("values")); - try (InputStream log = cluster.getNodeLog(0, LogType.SERVER)) { - boolean[] found = new boolean[] { false }; - Streams.readAllLines(log, line -> { - if (line.contains("DO_LOG_ME")) { - found[0] = true; - } - }); - assertThat(found[0], equalTo(true)); + boolean[] found = new boolean[] { false }; + for (int i = 0; i < cluster.getNumNodes(); i++) { + try (InputStream log = cluster.getNodeLog(i, LogType.SERVER)) { + Streams.readAllLines(log, line -> { + if (line.contains("DO_LOG_ME")) { + found[0] = true; + } + }); + } } + assertThat(found[0], equalTo(true)); } finally { setLoggingLevel(null); } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog.csv-spec index 64c4641b2ca01..3f6ef72d84bc3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog.csv-spec @@ -1,7 +1,7 @@ # Examples that were published in a blog post 2023-08-08.full-blown-query -required_feature: esql.enrich_load +required_capability: enrich_load FROM employees | WHERE still_hired == true diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec index 809f4e9ba2c74..c0572e7bbcd49 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec @@ -63,7 +63,7 @@ avg(salary):double | always_false:boolean in -required_feature: esql.mv_warn +required_capability: mv_warn from employees | keep emp_no, is_rehired, still_hired | where is_rehired in (still_hired, true) | where is_rehired != still_hired; ignoreOrder:true @@ -236,7 +236,7 @@ emp_no:integer |languages:integer |byte2bool:boolean |short2bool:boolean ; mvSort -required_feature: esql.mv_sort +required_capability: mv_sort row a = [true, false, true, false] | eval sa = mv_sort(a), sb = mv_sort(a, "DESC"); @@ -245,7 +245,7 @@ a:boolean | sa:boolean | sb:boolean ; mvSortEmp -required_feature: esql.mv_sort +required_capability: mv_sort FROM employees | eval sd = mv_sort(is_rehired, "DESC"), sa = mv_sort(is_rehired) @@ -263,7 +263,7 @@ emp_no:integer | is_rehired:boolean | sa:boolean | sd:boolea ; mvSlice -required_feature: esql.mv_sort +required_capability: mv_sort row a = [true, false, false, true] | eval a1 = mv_slice(a, 1), a2 = mv_slice(a, 2, 3); @@ -273,7 +273,7 @@ a:boolean | a1:boolean | a2:boolean ; mvSliceEmp -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval a1 = mv_slice(is_rehired, 0) @@ -290,7 +290,7 @@ emp_no:integer | is_rehired:boolean | a1:boolean ; values -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -302,7 +302,7 @@ required_feature: esql.agg_values ; valuesGrouped -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -323,7 +323,7 @@ still_hired:boolean | first_letter:keyword ; valuesGroupedByOrdinals -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -347,7 +347,7 @@ still_hired:boolean | job_positions:keyword ; implicitCastingEqual -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from employees | where still_hired == "true" | sort emp_no | keep emp_no | limit 1; emp_no:integer @@ -355,7 +355,7 @@ emp_no:integer ; implicitCastingNotEqual -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from employees | where still_hired != "true" | sort emp_no | keep emp_no | limit 1; emp_no:integer @@ -363,7 +363,7 @@ emp_no:integer ; implicitCastingIn -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from employees | where still_hired in ("true", "false") | sort emp_no | keep emp_no | limit 1; emp_no:integer @@ -371,7 +371,7 @@ emp_no:integer ; implicitCastingInField -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from employees | where false in ("true", still_hired) | sort emp_no | keep emp_no | limit 1; emp_no:integer diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/cartesian_multipolygons.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/cartesian_multipolygons.csv-spec index aa6529c2d4319..508cccc20b86c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/cartesian_multipolygons.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/cartesian_multipolygons.csv-spec @@ -6,7 +6,7 @@ # Test against a polygon similar in size to the Bottom Left polygon whereIntersectsSinglePolygon -required_feature: esql.st_intersects +required_capability: st_intersects FROM cartesian_multipolygons | WHERE ST_Intersects(shape, TO_CARTESIANSHAPE("POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))")) @@ -25,7 +25,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereContainsSinglePolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM cartesian_multipolygons | WHERE ST_Contains(shape, TO_CARTESIANSHAPE("POLYGON((0.001 0.001, 0.999 0.001, 0.999 0.999, 0.001 0.999, 0.001 0.001))")) @@ -38,7 +38,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereWithinSinglePolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM cartesian_multipolygons | WHERE ST_Within(shape, TO_CARTESIANSHAPE("POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))")) @@ -53,7 +53,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereDisjointSinglePolygon -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM cartesian_multipolygons | WHERE ST_Disjoint(shape, TO_CARTESIANSHAPE("POLYGON((0 0, 1 0, 1 1, 0 1, 0 0))")) @@ -79,7 +79,7 @@ id:l | name:keyword | shape:cartesian_shape # Test against a polygon smaller in size to the Bottom Left polygon whereIntersectsSmallerPolygon -required_feature: esql.st_intersects +required_capability: st_intersects FROM cartesian_multipolygons | WHERE ST_Intersects(shape, TO_CARTESIANSHAPE("POLYGON((0.2 0.2, 0.8 0.2, 0.8 0.8, 0.2 0.8, 0.2 0.2))")) @@ -98,7 +98,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereContainsSmallerPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM cartesian_multipolygons | WHERE ST_Contains(shape, TO_CARTESIANSHAPE("POLYGON((0.2 0.2, 0.8 0.2, 0.8 0.8, 0.2 0.8, 0.2 0.2))")) @@ -111,7 +111,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereWithinSmallerPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM cartesian_multipolygons | WHERE ST_Within(shape, TO_CARTESIANSHAPE("POLYGON((0.2 0.2, 0.8 0.2, 0.8 0.8, 0.2 0.8, 0.2 0.2))")) @@ -123,7 +123,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereDisjointSmallerPolygon -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM cartesian_multipolygons | WHERE ST_Disjoint(shape, TO_CARTESIANSHAPE("POLYGON((0.2 0.2, 0.8 0.2, 0.8 0.8, 0.2 0.8, 0.2 0.2))")) @@ -149,7 +149,7 @@ id:l | name:keyword | shape:cartesian_shape # Test against a polygon similar in size to the entire test data whereIntersectsLargerPolygon -required_feature: esql.st_intersects +required_capability: st_intersects FROM cartesian_multipolygons | WHERE ST_Intersects(shape, TO_CARTESIANSHAPE("POLYGON((0 0, 3 0, 3 3, 0 3, 0 0))")) @@ -180,7 +180,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereContainsLargerPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM cartesian_multipolygons | WHERE ST_Contains(shape, TO_CARTESIANSHAPE("POLYGON((0 0, 3 0, 3 3, 0 3, 0 0))")) @@ -191,7 +191,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereWithinLargerPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM cartesian_multipolygons | WHERE ST_Within(shape, TO_CARTESIANSHAPE("POLYGON((0 0, 3 0, 3 3, 0 3, 0 0))")) @@ -222,7 +222,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereDisjointLargerPolygon -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM cartesian_multipolygons | WHERE ST_Disjoint(shape, TO_CARTESIANSHAPE("POLYGON((0 0, 3 0, 3 3, 0 3, 0 0))")) @@ -236,7 +236,7 @@ id:l | name:keyword | shape:cartesian_shape # Test against a polygon larger than all test data whereIntersectsEvenLargerPolygon -required_feature: esql.st_intersects +required_capability: st_intersects FROM cartesian_multipolygons | WHERE ST_Intersects(shape, TO_CARTESIANSHAPE("POLYGON((-1 -1, 4 -1, 4 4, -1 4, -1 -1))")) @@ -267,7 +267,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereContainsEvenLargerPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM cartesian_multipolygons | WHERE ST_Contains(shape, TO_CARTESIANSHAPE("POLYGON((-1 -1, 4 -1, 4 4, -1 4, -1 -1))")) @@ -278,7 +278,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereWithinEvenLargerPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM cartesian_multipolygons | WHERE ST_Within(shape, TO_CARTESIANSHAPE("POLYGON((-1 -1, 4 -1, 4 4, -1 4, -1 -1))")) @@ -309,7 +309,7 @@ id:l | name:keyword | shape:cartesian_shape ; whereDisjointEvenLargerPolygon -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM cartesian_multipolygons | WHERE ST_Disjoint(shape, TO_CARTESIANSHAPE("POLYGON((-1 -1, 4 -1, 4 4, -1 4, -1 -1))")) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec index 64a8c1d9da316..d4b45ca37fc2d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec @@ -130,7 +130,7 @@ error_rate:double | hour:date nullOnMultivaluesMathOperation -required_feature: esql.disable_nullable_opts +required_capability: disable_nullable_opts ROW a = 5, b = [ 1, 2 ]| EVAL sum = a + b| LIMIT 1 | WHERE sum IS NULL; warning:Line 1:37: evaluation of [a + b] failed, treating result as null. Only first 20 failures recorded. @@ -142,7 +142,7 @@ a:integer | b:integer | sum:integer notNullOnMultivaluesMathOperation -required_feature: esql.disable_nullable_opts +required_capability: disable_nullable_opts ROW a = 5, b = [ 1, 2 ]| EVAL sum = a + b| LIMIT 1 | WHERE sum IS NOT NULL; warning:Line 1:37: evaluation of [a + b] failed, treating result as null. Only first 20 failures recorded. @@ -153,7 +153,7 @@ a:integer | b:integer | sum:integer nullOnMultivaluesComparisonOperation -required_feature: esql.disable_nullable_opts +required_capability: disable_nullable_opts ROW a = 5, b = [ 1, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NULL; warning:Line 1:38: evaluation of [a == b] failed, treating result as null. Only first 20 failures recorded. @@ -166,7 +166,7 @@ a:integer | b:integer | same:boolean notNullOnMultivaluesComparisonOperation -required_feature: esql.disable_nullable_opts +required_capability: disable_nullable_opts ROW a = 5, b = [ 1, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NOT NULL; warning:Line 1:38: evaluation of [a == b] failed, treating result as null. Only first 20 failures recorded. @@ -177,7 +177,7 @@ a:integer | b:integer | same:boolean notNullOnMultivaluesComparisonOperationWithPartialMatch -required_feature: esql.disable_nullable_opts +required_capability: disable_nullable_opts ROW a = 5, b = [ 5, 2 ]| EVAL same = a == b| LIMIT 1 | WHERE same IS NOT NULL; warning:Line 1:38: evaluation of [a == b] failed, treating result as null. Only first 20 failures recorded. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec index 43e683e165e29..94dfd9f3267f7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec @@ -1,7 +1,7 @@ // Conversion-specific tests convertToBoolean -required_feature: esql.casting_operator +required_capability: casting_operator ROW zero=0::boolean, one=1::bool ; @@ -10,7 +10,7 @@ false |true ; convertToInteger -required_feature: esql.casting_operator +required_capability: casting_operator ROW zero="0"::integer, one="1"::int ; @@ -19,7 +19,7 @@ ROW zero="0"::integer, one="1"::int ; convertToIP -required_feature: esql.casting_operator +required_capability: casting_operator ROW ip="1.1.1.1"::ip ; @@ -28,7 +28,7 @@ ROW ip="1.1.1.1"::ip ; convertToLong -required_feature: esql.casting_operator +required_capability: casting_operator ROW long="-1"::long ; @@ -37,7 +37,7 @@ long:long ; convertToLongWithWarning -required_feature: esql.casting_operator +required_capability: casting_operator ROW long="1.1.1.1"::long ; warning:Line 1:10: evaluation of [\"1.1.1.1\"::long] failed, treating result as null. Only first 20 failures recorded. @@ -48,7 +48,7 @@ null ; convertToDouble -required_feature: esql.casting_operator +required_capability: casting_operator ROW zero="0"::double ; @@ -57,7 +57,7 @@ ROW zero="0"::double ; convertToString -required_feature: esql.casting_operator +required_capability: casting_operator ROW one=1::keyword, two=2::text, three=3::string ; @@ -66,7 +66,7 @@ ROW one=1::keyword, two=2::text, three=3::string ; convertToDatetime -required_feature: esql.casting_operator +required_capability: casting_operator ROW date="1985-01-01T00:00:00Z"::datetime, zero=0::datetime ; @@ -75,7 +75,7 @@ ROW date="1985-01-01T00:00:00Z"::datetime, zero=0::datetime ; convertToVersion -required_feature: esql.casting_operator +required_capability: casting_operator ROW ver="1.2.3"::version ; @@ -84,7 +84,7 @@ ROW ver="1.2.3"::version ; convertToUnsignedLong -required_feature: esql.casting_operator +required_capability: casting_operator ROW zero="0"::unsigned_long, two=abs(-2)::UnsigneD_LOng ; @@ -93,7 +93,7 @@ ROW zero="0"::unsigned_long, two=abs(-2)::UnsigneD_LOng ; convertToGeoPoint -required_feature: esql.casting_operator +required_capability: casting_operator ROW gp="POINT(0 0)"::geo_point ; @@ -102,7 +102,7 @@ POINT (0.0 0.0) ; convertToGeoShape -required_feature: esql.casting_operator +required_capability: casting_operator ROW gs="POINT(0 0)"::geo_shape ; @@ -111,7 +111,7 @@ POINT (0.0 0.0) ; convertToCartesianPoint -required_feature: esql.casting_operator +required_capability: casting_operator ROW cp="POINT(0 0)"::cartesian_point ; @@ -120,7 +120,7 @@ POINT (0.0 0.0) ; convertToCartesianShape -required_feature: esql.casting_operator +required_capability: casting_operator ROW cs="POINT(0 0)"::cartesian_shape ; @@ -129,7 +129,7 @@ POINT (0.0 0.0) ; convertChained -required_feature: esql.casting_operator +required_capability: casting_operator ROW one=1::STRING::LONG::BOOL ; @@ -138,7 +138,7 @@ true ; convertWithIndexMultipleConversionsInSameExpressionAndConversionInFiltering -required_feature: esql.casting_operator +required_capability: casting_operator FROM employees | EVAL en_str=emp_no::STRING, bd=ABS(birth_date::LONG)::STRING | KEEP en_str, emp_no, bd, birth_date @@ -153,7 +153,7 @@ required_feature: esql.casting_operator ; convertWithBoolExpressionAndQualifiedName -required_feature: esql.casting_operator +required_capability: casting_operator FROM employees | EVAL neg = (NOT still_hired)::string, sf = ROUND(height.scaled_float::double, 2) | KEEP emp_no, still_hired, neg, sf @@ -169,7 +169,7 @@ required_feature: esql.casting_operator ; docsCastOperator -required_feature: esql.casting_operator +required_capability: casting_operator //tag::docsCastOperator[] ROW ver = CONCAT(("0"::INT + 1)::STRING, ".2.3")::VERSION //end::docsCastOperator[] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 8d54288de552d..22e9231939d02 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -216,7 +216,7 @@ string:keyword |datetime:date ; convertFromUnsignedLong -required_feature:esql.convert_warn +required_capability: convert_warn row ul = [9223372036854775808, 520128000000] | eval dt = to_datetime(ul); warning:Line 1:58: evaluation of [to_datetime(ul)] failed, treating result as null. Only first 20 failures recorded. @@ -357,7 +357,7 @@ date1:date | date2:date | dd_ms:integer ; evalDateDiffString -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting ROW date1 = TO_DATETIME("2023-12-02T11:00:00.000Z") | EVAL dd_ms = DATE_DIFF("microseconds", date1, "2023-12-02T11:00:00.001Z") @@ -623,7 +623,7 @@ dt:datetime |plus_post:datetime |plus_pre:datetime datePlusQuarter # "quarter" introduced in 8.15 -required_feature: esql.timespan_abbreviations +required_capability: timespan_abbreviations row dt = to_dt("2100-01-01T01:01:01.000Z") | eval plusQuarter = dt + 2 quarters ; @@ -634,7 +634,7 @@ dt:datetime | plusQuarter:datetime datePlusAbbreviatedDurations # abbreviations introduced in 8.15 -required_feature: esql.timespan_abbreviations +required_capability: timespan_abbreviations row dt = to_dt("2100-01-01T00:00:00.000Z") | eval plusDurations = dt + 1 h + 2 min + 2 sec + 1 s + 4 ms ; @@ -645,7 +645,7 @@ dt:datetime | plusDurations:datetime datePlusAbbreviatedPeriods # abbreviations introduced in 8.15 -required_feature: esql.timespan_abbreviations +required_capability: timespan_abbreviations row dt = to_dt("2100-01-01T00:00:00.000Z") | eval plusDurations = dt + 0 yr + 1y + 2 q + 3 mo + 4 w + 3 d ; @@ -855,7 +855,7 @@ date:date | year:long ; dateExtractString -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting ROW date = DATE_PARSE("yyyy-MM-dd", "2022-05-06") | EVAL year = DATE_EXTRACT("year", "2022-05-06") @@ -896,7 +896,7 @@ Anneke |Preusig |1989-06-02T00:00:00.000Z|1989-06-02 ; evalDateFormatString -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting ROW a = 1 | EVAL df = DATE_FORMAT("YYYY-MM-dd", "1989-06-02T00:00:00.000Z") @@ -925,7 +925,7 @@ Anneke |Preusig |1989-06-02T00:00:00.000Z|1989-01-01T00:00:00.000 ; evalDateTruncString -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting ROW a = 1 | EVAL year_hired = DATE_TRUNC(1 year, "1991-06-26T00:00:00.000Z") @@ -990,7 +990,7 @@ FROM sample_data ; mvSort -required_feature: esql.mv_sort +required_capability: mv_sort row a = ["1985-01-01T00:00:00.000Z", "1986-01-01T00:00:00.000Z", "1987-01-01T00:00:00.000Z"] | eval datetime = TO_DATETIME(a) @@ -1019,7 +1019,7 @@ count:long | age:long ; values -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10003 @@ -1031,7 +1031,7 @@ required_feature: esql.agg_values ; valuesGrouped -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -1052,7 +1052,7 @@ required_feature: esql.agg_values ; valuesGroupedByOrdinals -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -1077,7 +1077,7 @@ required_feature: esql.agg_values ; implicitCastingNotEqual -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting from employees | where birth_date != "1957-05-23T00:00:00Z" | keep emp_no, birth_date | sort emp_no | limit 3; emp_no:integer | birth_date:datetime @@ -1087,7 +1087,7 @@ emp_no:integer | birth_date:datetime ; implicitCastingLessThanOrEqual -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting from employees | where birth_date <= "1957-05-20T00:00:00Z" | keep emp_no, birth_date | sort emp_no | limit 3; emp_no:integer | birth_date:datetime @@ -1097,7 +1097,7 @@ emp_no:integer | birth_date:datetime ; implicitCastingGreaterThan -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting from employees | where birth_date > "1957-05-24T00:00:00Z" | keep emp_no, birth_date | sort emp_no | limit 3; emp_no:integer | birth_date:datetime diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec index f044989ec9cce..bd384886f0dd7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/enrich.csv-spec @@ -32,7 +32,7 @@ median_duration:double | env:keyword ; simple -required_feature: esql.enrich_load +required_capability: enrich_load // tag::enrich[] ROW language_code = "1" @@ -47,7 +47,7 @@ language_code:keyword | language_name:keyword ; enrichOnSimple -required_feature: esql.enrich_load +required_capability: enrich_load // tag::enrich_on[] ROW a = "1" @@ -63,7 +63,7 @@ a:keyword | language_name:keyword enrichOn -required_feature: esql.enrich_load +required_capability: enrich_load from employees | sort emp_no | limit 1 | eval x = to_string(languages) | enrich languages_policy on x | keep emp_no, language_name; @@ -73,7 +73,7 @@ emp_no:integer | language_name:keyword enrichOn2 -required_feature: esql.enrich_load +required_capability: enrich_load from employees | eval x = to_string(languages) | enrich languages_policy on x | keep emp_no, language_name | sort emp_no | limit 1 ; @@ -83,7 +83,7 @@ emp_no:integer | language_name:keyword simpleSortLimit -required_feature: esql.enrich_load +required_capability: enrich_load from employees | eval x = to_string(languages) | enrich languages_policy on x | keep emp_no, language_name | sort emp_no | limit 1; @@ -92,7 +92,7 @@ emp_no:integer | language_name:keyword ; with -required_feature: esql.enrich_load +required_capability: enrich_load from employees | eval x = to_string(languages) | keep emp_no, x | sort emp_no | limit 1 | enrich languages_policy on x with language_name; @@ -103,7 +103,7 @@ emp_no:integer | x:keyword | language_name:keyword withSimple -required_feature: esql.enrich_load +required_capability: enrich_load // tag::enrich_with[] ROW a = "1" @@ -119,7 +119,7 @@ a:keyword | language_name:keyword withAlias -required_feature: esql.enrich_load +required_capability: enrich_load from employees | sort emp_no | limit 3 | eval x = to_string(languages) | keep emp_no, x | enrich languages_policy on x with lang = language_name; @@ -131,7 +131,7 @@ emp_no:integer | x:keyword | lang:keyword ; withAliasSimple -required_feature: esql.enrich_load +required_capability: enrich_load // tag::enrich_rename[] ROW a = "1" @@ -147,7 +147,7 @@ a:keyword | name:keyword withAliasSort -required_feature: esql.enrich_load +required_capability: enrich_load from employees | eval x = to_string(languages) | keep emp_no, x | sort emp_no | limit 3 | enrich languages_policy on x with lang = language_name; @@ -160,7 +160,7 @@ emp_no:integer | x:keyword | lang:keyword withAliasOverwriteName#[skip:-8.13.0] -required_feature: esql.enrich_load +required_capability: enrich_load from employees | sort emp_no | eval x = to_string(languages) | enrich languages_policy on x with emp_no = language_name @@ -172,7 +172,7 @@ French ; withAliasAndPlain -required_feature: esql.enrich_load +required_capability: enrich_load from employees | sort emp_no desc | limit 3 | eval x = to_string(languages) | keep emp_no, x | enrich languages_policy on x with lang = language_name, language_name; @@ -185,7 +185,7 @@ emp_no:integer | x:keyword | lang:keyword | language_name:keyword withTwoAliasesSameProp -required_feature: esql.enrich_load +required_capability: enrich_load from employees | sort emp_no | limit 1 | eval x = to_string(languages) | keep emp_no, x | enrich languages_policy on x with lang = language_name, lang2 = language_name; @@ -196,7 +196,7 @@ emp_no:integer | x:keyword | lang:keyword | lang2:keyword redundantWith -required_feature: esql.enrich_load +required_capability: enrich_load from employees | sort emp_no | limit 1 | eval x = to_string(languages) | keep emp_no, x | enrich languages_policy on x with language_name, language_name; @@ -207,7 +207,7 @@ emp_no:integer | x:keyword | language_name:keyword nullInput -required_feature: esql.enrich_load +required_capability: enrich_load from employees | where emp_no == 10017 | keep emp_no, gender | enrich languages_policy on gender with language_name, language_name; @@ -218,7 +218,7 @@ emp_no:integer | gender:keyword | language_name:keyword constantNullInput -required_feature: esql.enrich_load +required_capability: enrich_load from employees | where emp_no == 10020 | eval x = to_string(languages) | keep emp_no, x | enrich languages_policy on x with language_name, language_name; @@ -229,7 +229,7 @@ emp_no:integer | x:keyword | language_name:keyword multipleEnrich -required_feature: esql.enrich_load +required_capability: enrich_load row a = "1", b = "2", c = "10" | enrich languages_policy on a with a_lang = language_name @@ -242,7 +242,7 @@ a:keyword | b:keyword | c:keyword | a_lang:keyword | b_lang:keyword | c_lang:key enrichEval -required_feature: esql.enrich_load +required_capability: enrich_load from employees | eval x = to_string(languages) | enrich languages_policy on x with lang = language_name @@ -258,8 +258,8 @@ emp_no:integer | x:keyword | lang:keyword | language:keyword multivalue -required_feature: esql.enrich_load -required_feature: esql.mv_sort +required_capability: enrich_load +required_capability: mv_sort row a = ["1", "2"] | enrich languages_policy on a with a_lang = language_name | eval a_lang = mv_sort(a_lang); @@ -269,7 +269,7 @@ a:keyword | a_lang:keyword enrichCidr#[skip:-8.13.99, reason:enrich for cidr added in 8.14.0] -required_feature: esql.enrich_load +required_capability: enrich_load FROM sample_data | ENRICH client_cidr_policy ON client_ip WITH env @@ -290,7 +290,7 @@ client_ip:ip | count_env:i | max_env:keyword enrichCidr2#[skip:-8.99.99, reason:ip_range support not added yet] -required_feature: esql.enrich_load +required_capability: enrich_load FROM sample_data | ENRICH client_cidr_policy ON client_ip WITH env, client_cidr @@ -310,7 +310,7 @@ client_ip:ip | env:keyword | client_cidr:ip_range enrichAgesStatsYear#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] -required_feature: esql.enrich_load +required_capability: enrich_load FROM employees | WHERE birth_date > "1960-01-01" @@ -333,7 +333,7 @@ birth_year:long | age_group:keyword | count:long enrichAgesStatsAgeGroup#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] -required_feature: esql.enrich_load +required_capability: enrich_load FROM employees | WHERE birth_date IS NOT NULL @@ -350,7 +350,7 @@ count:long | age_group:keyword enrichHeightsStats#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] -required_feature: esql.enrich_load +required_capability: enrich_load FROM employees | ENRICH heights_policy ON height WITH height_group = description @@ -369,7 +369,7 @@ Very Tall | 2.0 | 2.1 | 20 enrichDecadesStats#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] -required_feature: esql.enrich_load +required_capability: enrich_load FROM employees | ENRICH decades_policy ON birth_date WITH birth_decade = decade, birth_description = description @@ -390,7 +390,7 @@ null | 1980 | null | Radical Eighties | 4 spatialEnrichmentKeywordMatch#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] -required_feature: esql.enrich_load +required_capability: enrich_load FROM airports | WHERE abbrev == "CPH" @@ -405,7 +405,7 @@ CPH | Copenhagen | POINT(12.5683 55.6761) | Denmark spatialEnrichmentGeoMatch#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] -required_feature: esql.enrich_load +required_capability: enrich_load FROM airports | WHERE abbrev == "CPH" @@ -420,8 +420,8 @@ CPH | Copenhagen | POINT(12.5683 55.6761) | Denmark spatialEnrichmentGeoMatchStats#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] -required_feature: esql.enrich_load -required_feature: esql.mv_warn +required_capability: enrich_load +required_capability: mv_warn FROM airports | ENRICH city_boundaries ON city_location WITH airport, region, city_boundary @@ -437,7 +437,7 @@ POINT(1.396561 24.127649) | 872 | 88 | 1044 spatialEnrichmentKeywordMatchAndSpatialPredicate#[skip:-8.13.99, reason:st_intersects added in 8.14] -required_feature: esql.enrich_load +required_capability: enrich_load FROM airports | ENRICH city_names ON city WITH airport, region, city_boundary @@ -455,7 +455,7 @@ count:long | airport_in_city:boolean spatialEnrichmentKeywordMatchAndSpatialAggregation#[skip:-8.13.99, reason:st_intersects added in 8.14] -required_feature: esql.enrich_load +required_capability: enrich_load FROM airports | ENRICH city_names ON city WITH airport, region, city_boundary @@ -473,7 +473,7 @@ count:long | centroid:geo_point | airport_in_city:boolean spatialEnrichmentTextMatch#[skip:-8.13.99, reason:ENRICH extended in 8.14.0] -required_feature: esql.enrich_load +required_capability: enrich_load FROM airports | WHERE abbrev == "IDR" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index 85b665d717449..571d7835451c3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -201,7 +201,7 @@ Kyoichi. |Kyoichi.Maliniak |Kyoichi.MaliniakKyoichi. |Kyoichi ; roundArrays -required_feature: esql.disable_nullable_opts +required_capability: disable_nullable_opts row a = [1.2], b = [2.4, 7.9] | eval c = round(a), d = round(b), e = round([1.2]), f = round([1.2, 4.6]), g = round([1.14], 1), h = round([1.14], [1, 2]); warning:Line 1:56: evaluation of [round(b)] failed, treating result as null. Only first 20 failures recorded. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index 8af770c521243..1f2bcb6b51209 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -92,7 +92,7 @@ int:integer |dbl:double ; lessThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where salary_change < 1 | keep emp_no, salary_change | sort emp_no | limit 5; warning:Line 1:24: evaluation of [salary_change < 1] failed, treating result as null. Only first 20 failures recorded. @@ -108,7 +108,7 @@ emp_no:integer |salary_change:double ; greaterThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where salary_change > 1 | keep emp_no, salary_change | sort emp_no | limit 5; warning:Line 1:24: evaluation of [salary_change > 1] failed, treating result as null. Only first 20 failures recorded. @@ -124,7 +124,7 @@ emp_no:integer |salary_change:double ; equalToMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where salary_change == 1.19 | keep emp_no, salary_change | sort emp_no; warning:Line 1:24: evaluation of [salary_change == 1.19] failed, treating result as null. Only first 20 failures recorded. @@ -136,7 +136,7 @@ emp_no:integer |salary_change:double ; equalToOrEqualToMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where salary_change == 1.19 or salary_change == 7.58 | keep emp_no, salary_change | sort emp_no; warning:Line 1:24: evaluation of [salary_change] failed, treating result as null. Only first 20 failures recorded. @@ -149,7 +149,7 @@ emp_no:integer |salary_change:double ; inMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where salary_change in (1.19, 7.58) | keep emp_no, salary_change | sort emp_no; warning:Line 1:24: evaluation of [salary_change in (1.19, 7.58)] failed, treating result as null. Only first 20 failures recorded. @@ -162,7 +162,7 @@ emp_no:integer |salary_change:double ; notLessThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where not(salary_change < 1) | keep emp_no, salary_change | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change < 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change < 1] failed, treating result as null. Only first 20 failures recorded.] @@ -178,7 +178,7 @@ emp_no:integer |salary_change:double ; notGreaterThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where not(salary_change > 1) | keep emp_no, salary_change | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change > 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change > 1] failed, treating result as null. Only first 20 failures recorded.] @@ -194,7 +194,7 @@ emp_no:integer |salary_change:double ; notEqualToMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where not(salary_change == 1.19) | keep emp_no, salary_change | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change == 1.19)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change == 1.19] failed, treating result as null. Only first 20 failures recorded.] @@ -241,7 +241,7 @@ row a = [1.1, 2.1, 2.1] | eval da = mv_dedupe(a); ; mvSliceEmp -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval a1 = mv_slice(salary_change, 0, 1) @@ -436,7 +436,7 @@ ROW deg = [90.0, 180.0, 270.0] ; mvSort -required_feature: esql.mv_sort +required_capability: mv_sort row a = [4.0, 2.0, -3.0, 2.0] | eval sa = mv_sort(a), sd = mv_sort(a, "DESC"); @@ -445,7 +445,7 @@ a:double | sa:double | sd:double ; mvSortEmp -required_feature: esql.mv_sort +required_capability: mv_sort FROM employees | eval sd = mv_sort(salary_change, "DESC"), sa = mv_sort(salary_change) @@ -467,7 +467,7 @@ emp_no:integer | salary_change:double | sa:double | sd:double ; values -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -479,7 +479,7 @@ required_feature: esql.agg_values ; valuesGrouped -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -500,7 +500,7 @@ required_feature: esql.agg_values ; valuesGroupedByOrdinals -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/from.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/from.csv-spec index c2c0b82f1a664..00a8c0da8f14c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/from.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/from.csv-spec @@ -130,7 +130,7 @@ c:l | name:k ; convertFromDatetimeWithOptions -required_feature: esql.from_options +required_capability: from_options // tag::convertFromDatetimeWithOptions[] FROM employees OPTIONS "allow_no_indices"="false","preference"="_local" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 69ae951e4290d..e247d6c3a04ef 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -1,7 +1,7 @@ // Integral types-specific tests inLongAndInt -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where avg_worked_seconds in (372957040, salary_change.long, 236703986) | where emp_no in (10017, emp_no - 1) | keep emp_no, avg_worked_seconds; warning:Line 1:24: evaluation of [avg_worked_seconds in (372957040, salary_change.long, 236703986)] failed, treating result as null. Only first 20 failures recorded. @@ -68,7 +68,7 @@ long:long |ul:ul ; convertDoubleToUL -required_feature:esql.convert_warn +required_capability: convert_warn row d = 123.4 | eval ul = to_ul(d), overflow = to_ul(1e20); warningRegex:Line 1:48: evaluation of \[to_ul\(1e20\)\] failed, treating result as null. Only first 20 failures recorded. @@ -127,7 +127,7 @@ int:integer |long:long ; convertULToLong -required_feature:esql.convert_warn +required_capability: convert_warn row ul = [9223372036854775807, 9223372036854775808] | eval long = to_long(ul); warningRegex:Line 1:67: evaluation of \[to_long\(ul\)\] failed, treating result as null. Only first 20 failures recorded. @@ -170,7 +170,7 @@ str1:keyword |str2:keyword |str3:keyword |long1:long |long2:long |long3:long ; convertDoubleToLong -required_feature:esql.convert_warn +required_capability: convert_warn row d = 123.4 | eval d2l = to_long(d), overflow = to_long(1e19); warningRegex:Line 1:51: evaluation of \[to_long\(1e19\)\] failed, treating result as null. Only first 20 failures recorded. @@ -190,7 +190,7 @@ int:integer |ii:integer ; convertLongToInt -required_feature:esql.convert_warn +required_capability: convert_warn // tag::to_int-long[] ROW long = [5013792, 2147483647, 501379200000] @@ -207,7 +207,7 @@ long:long |int:integer ; convertULToInt -required_feature:esql.convert_warn +required_capability: convert_warn row ul = [2147483647, 9223372036854775808] | eval int = to_int(ul); warningRegex:Line 1:57: evaluation of \[to_int\(ul\)\] failed, treating result as null. Only first 20 failures recorded. @@ -239,7 +239,7 @@ int_str:keyword |int_dbl_str:keyword |is2i:integer|ids2i:integer ; convertStringToIntFail#[skip:-8.13.99, reason:warning changed in 8.14] -required_feature: esql.mv_warn +required_capability: mv_warn row str1 = "2147483647.2", str2 = "2147483648", non = "no number" | eval i1 = to_integer(str1), i2 = to_integer(str2), noi = to_integer(non); warning:Line 1:79: evaluation of [to_integer(str1)] failed, treating result as null. Only first 20 failures recorded. @@ -254,7 +254,7 @@ str1:keyword |str2:keyword |non:keyword |i1:integer |i2:integer | ; convertDoubleToInt -required_feature:esql.convert_warn +required_capability: convert_warn row d = 123.4 | eval d2i = to_integer(d), overflow = to_integer(1e19); warningRegex:Line 1:54: evaluation of \[to_integer\(1e19\)\] failed, treating result as null. Only first 20 failures recorded. @@ -265,7 +265,7 @@ d:double |d2i:integer |overflow:integer ; lessThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where salary_change.int < 1 | keep emp_no, salary_change.int | sort emp_no | limit 5; warning:Line 1:24: evaluation of [salary_change.int < 1] failed, treating result as null. Only first 20 failures recorded. @@ -281,7 +281,7 @@ emp_no:integer |salary_change.int:integer ; greaterThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where salary_change.int > 1 | keep emp_no, salary_change.int | sort emp_no | limit 5; warning:Line 1:24: evaluation of [salary_change.int > 1] failed, treating result as null. Only first 20 failures recorded. @@ -297,7 +297,7 @@ emp_no:integer |salary_change.int:integer ; equalToMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where salary_change.int == 0 | keep emp_no, salary_change.int | sort emp_no; warning:Line 1:24: evaluation of [salary_change.int == 0] failed, treating result as null. Only first 20 failures recorded. @@ -312,7 +312,7 @@ emp_no:integer |salary_change.int:integer ; equalToOrEqualToMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where salary_change.int == 1 or salary_change.int == 8 | keep emp_no, salary_change.int | sort emp_no; warning:Line 1:24: evaluation of [salary_change.int] failed, treating result as null. Only first 20 failures recorded. @@ -325,7 +325,7 @@ emp_no:integer |salary_change.int:integer ; inMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where salary_change.int in (1, 7) | keep emp_no, salary_change.int | sort emp_no; warning:Line 1:24: evaluation of [salary_change.int in (1, 7)] failed, treating result as null. Only first 20 failures recorded. @@ -338,7 +338,7 @@ emp_no:integer |salary_change.int:integer ; notLessThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where not(salary_change.int < 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change.int < 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change.int < 1] failed, treating result as null. Only first 20 failures recorded.] @@ -354,7 +354,7 @@ emp_no:integer |salary_change.int:integer ; notGreaterThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where not(salary_change.int > 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change.int > 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change.int > 1] failed, treating result as null. Only first 20 failures recorded.] @@ -370,7 +370,7 @@ emp_no:integer |salary_change.int:integer ; notEqualToMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where not(salary_change.int == 1) | keep emp_no, salary_change.int | sort emp_no | limit 5; warning:Line 1:24: evaluation of [not(salary_change.int == 1)] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [salary_change.int == 1] failed, treating result as null. Only first 20 failures recorded.] @@ -417,7 +417,7 @@ row a = [1, 2, 2, 3] | eval da = mv_dedupe(a); ; mvSort -required_feature: esql.mv_sort +required_capability: mv_sort // tag::mv_sort[] ROW a = [4, 2, -3, 2] @@ -432,7 +432,7 @@ a:integer | sa:integer | sd:integer ; mvSortEmpInt -required_feature: esql.mv_sort +required_capability: mv_sort FROM employees | eval sd = mv_sort(salary_change.int, "DESC"), sa = mv_sort(salary_change.int) @@ -454,7 +454,7 @@ emp_no:integer | salary_change.int:integer | sa:integer | sd:integer ; mvSortEmpLong -required_feature: esql.mv_sort +required_capability: mv_sort FROM employees | eval sd = mv_sort(salary_change.long, "DESC"), sa = mv_sort(salary_change.long) @@ -476,7 +476,7 @@ emp_no:integer | salary_change.long:long | sa:long | sd:long ; mvSlice -required_feature: esql.mv_sort +required_capability: mv_sort // tag::mv_slice_positive[] row a = [1, 2, 2, 3] @@ -490,7 +490,7 @@ a:integer | a1:integer | a2:integer ; mvSliceNegativeOffset -required_feature: esql.mv_sort +required_capability: mv_sort // tag::mv_slice_negative[] row a = [1, 2, 2, 3] @@ -504,7 +504,7 @@ a:integer | a1:integer | a2:integer ; mvSliceSingle -required_feature: esql.mv_sort +required_capability: mv_sort row a = 1 | eval a1 = mv_slice(a, 0); @@ -514,7 +514,7 @@ a:integer | a1:integer ; mvSliceOutOfBound -required_feature: esql.mv_sort +required_capability: mv_sort row a = [1, 2, 2, 3] | eval a1 = mv_slice(a, 4), a2 = mv_slice(a, 2, 6), a3 = mv_slice(a, 4, 6); @@ -524,7 +524,7 @@ a:integer | a1:integer | a2:integer | a3:integer ; mvSliceEmpInt -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval a1 = mv_slice(salary_change.int, 0, 1) @@ -541,7 +541,7 @@ emp_no:integer | salary_change.int:integer | a1:integer ; mvSliceEmpIntSingle -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval a1 = mv_slice(salary_change.int, 1) @@ -558,7 +558,7 @@ emp_no:integer | salary_change.int:integer | a1:integer ; mvSliceEmpIntEndOutOfBound -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval a1 = mv_slice(salary_change.int, 1, 4) @@ -575,7 +575,7 @@ emp_no:integer | salary_change.int:integer | a1:integer ; mvSliceEmpIntOutOfBound -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval a1 = mv_slice(salary_change.int, 2, 4) @@ -592,7 +592,7 @@ emp_no:integer | salary_change.int:integer | a1:integer ; mvSliceEmpIntStartOutOfBoundNegative -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval a1 = mv_slice(salary_change.int, -5, -2) @@ -609,7 +609,7 @@ emp_no:integer | salary_change.int:integer | a1:integer ; mvSliceEmpIntOutOfBoundNegative -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval a1 = mv_slice(salary_change.int, -5, -3) @@ -626,7 +626,7 @@ emp_no:integer | salary_change.int:integer | a1:integer ; mvSliceEmpLong -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval a1 = mv_slice(salary_change.long, 0, 1) @@ -750,7 +750,7 @@ x:long ; valuesLong -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -762,7 +762,7 @@ required_feature: esql.agg_values ; valuesLongGrouped -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -783,7 +783,7 @@ required_feature: esql.agg_values ; valuesLongGroupedByOrdinals -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -807,7 +807,7 @@ required_feature: esql.agg_values ; valuesInt -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -819,7 +819,7 @@ required_feature: esql.agg_values ; valuesIntGrouped -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -840,7 +840,7 @@ l:integer | first_letter:keyword ; valuesIntGroupedByOrdinals -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -864,7 +864,7 @@ required_feature: esql.agg_values ; valuesShort -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -876,7 +876,7 @@ required_feature: esql.agg_values ; valuesShortGrouped -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -897,7 +897,7 @@ l:integer | first_letter:keyword ; valuesShortGroupedByOrdinals -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index 8d3c0c9186c6c..ae683acbb2c3a 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -16,7 +16,7 @@ eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece ; equals -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | sort host, card | where ip0 == ip1 | keep card, host, ip0, ip1; warning:Line 1:38: evaluation of [ip0 == ip1] failed, treating result as null. Only first 20 failures recorded. @@ -60,7 +60,7 @@ eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece ; lessThan -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | sort host, card, ip1 | where ip0 < ip1 | keep card, host, ip0, ip1; warning:Line 1:43: evaluation of [ip0 < ip1] failed, treating result as null. Only first 20 failures recorded. @@ -73,7 +73,7 @@ lo0 |gamma |fe80::cae2:65ff:fece:feb9|fe81::cae2:65ff:fece:f ; notEquals -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | sort host, card, ip1 | where ip0 != ip1 | keep card, host, ip0, ip1; warning:Line 1:43: evaluation of [ip0 != ip1] failed, treating result as null. Only first 20 failures recorded. @@ -125,7 +125,7 @@ null |[127.0.0.1, 127.0.0.2, 127.0.0.3] ; conditional -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | eval eq=case(ip0==ip1, ip0, ip1) | keep eq, ip0, ip1; ignoreOrder:true @@ -146,7 +146,7 @@ fe80::cae2:65ff:fece:fec1 |[fe80::cae2:65ff:fece:feb ; in -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | eval eq=case(ip0==ip1, ip0, ip1) | where eq in (ip0, ip1) | keep card, host, ip0, ip1, eq; ignoreOrder:true @@ -168,7 +168,7 @@ eth0 |epsilon |[fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece inWithWarningsRegex#[skip:-8.13.99, reason:regex warnings in tests introduced in v 8.14.0] -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | eval eq=case(ip0==ip1, ip0, ip1) | where eq in (ip0, ip1) | keep card, host, ip0, ip1, eq; ignoreOrder:true @@ -188,7 +188,7 @@ eth0 |epsilon |[fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece ; cidrMatchSimple -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | where cidr_match(ip1, "127.0.0.2/32") | keep card, host, ip0, ip1; warning:Line 1:20: evaluation of [cidr_match(ip1, \"127.0.0.2/32\")] failed, treating result as null. Only first 20 failures recorded. @@ -199,7 +199,7 @@ eth1 |beta |127.0.0.1 |127.0.0.2 ; cidrMatchNullField -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | where cidr_match(ip0, "127.0.0.2/32") is null | keep card, host, ip0, ip1; ignoreOrder:true @@ -213,7 +213,7 @@ eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece ; cdirMatchMultipleArgs -required_feature: esql.mv_warn +required_capability: mv_warn //tag::cdirMatchMultipleArgs[] FROM hosts @@ -233,7 +233,7 @@ eth0 |gamma |fe80::cae2:65ff:fece:feb9|127.0.0.3 ; cidrMatchFunctionArg -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | where cidr_match(ip1, concat("127.0.0.2", "/32"), "127.0.0.3/32") | keep card, host, ip0, ip1; ignoreOrder:true @@ -246,7 +246,7 @@ eth0 |gamma |fe80::cae2:65ff:fece:feb9|127.0.0.3 ; cidrMatchFieldArg -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | eval cidr="127.0.0.2" | where cidr_match(ip1, cidr, "127.0.0.3/32") | keep card, host, ip0, ip1; ignoreOrder:true @@ -294,7 +294,7 @@ eth0 |beta |127.0.0.1 |::1 ; pushDownIPWithIn -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | where ip1 in (to_ip("::1"), to_ip("127.0.0.1")) | keep card, host, ip0, ip1; ignoreOrder:true @@ -308,7 +308,7 @@ eth0 |beta |127.0.0.1 |::1 ; pushDownIPWithComparision -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | where ip1 > to_ip("127.0.0.1") | keep card, ip1; ignoreOrder:true @@ -324,7 +324,7 @@ eth0 |fe80::cae2:65ff:fece:fec1 ; mvSort -required_feature: esql.mv_sort +required_capability: mv_sort FROM hosts | eval sd = mv_sort(ip1, "DESC"), sa = mv_sort(ip1) @@ -342,7 +342,7 @@ epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::c ; mvSlice -required_feature: esql.mv_sort +required_capability: mv_sort from hosts | where host == "epsilon" @@ -358,7 +358,7 @@ epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::c ; mvSlice -required_feature: esql.mv_sort +required_capability: mv_sort from hosts | where host == "epsilon" @@ -374,7 +374,7 @@ epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] | [fe81::c ; mvZip -required_feature: esql.mv_sort +required_capability: mv_sort from hosts | eval zip = mv_zip(to_string(description), to_string(ip0), "@@") @@ -392,7 +392,7 @@ epsilon | null | null ; values -required_feature: esql.agg_values +required_capability: agg_values FROM hosts | STATS ip0=MV_SORT(VALUES(ip0)) @@ -403,7 +403,7 @@ required_feature: esql.agg_values ; valuesGrouped -required_feature: esql.agg_values +required_capability: agg_values FROM hosts | EVAL host=SUBSTRING(host, 0, 1) @@ -419,7 +419,7 @@ fe80::cae2:65ff:fece:feb9 | g ; valuesGroupedByOrdinals -required_feature: esql.agg_values +required_capability: agg_values FROM hosts | STATS ip0=MV_SORT(VALUES(ip0)) BY host @@ -434,7 +434,7 @@ fe80::cae2:65ff:fece:feb9 | gamma ; implictCastingEqual -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from hosts | where mv_first(ip0) == "127.0.0.1" | keep host, ip0 | sort host; host:keyword | ip0:ip @@ -445,7 +445,7 @@ beta | 127.0.0.1 ; implictCastingNotEqual -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from hosts | where mv_first(ip0) != "127.0.0.1" | keep host, ip0 | sort host, ip0 | limit 3; host:keyword | ip0:ip @@ -455,7 +455,7 @@ epsilon | [fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece:fec0] ; implictCastingGreaterThan -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from hosts | where mv_first(ip0) > "127.0.0.1" | keep host, ip0 | sort host, ip0 | limit 3; host:keyword | ip0:ip @@ -465,7 +465,7 @@ gamma | fe80::cae2:65ff:fece:feb9 ; implictCastingLessThanOrEqual -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from hosts | where mv_first(ip0) <= "127.0.0.1" | keep host, ip0 | sort host, ip0 | limit 3; host:keyword | ip0:ip @@ -475,7 +475,7 @@ beta | 127.0.0.1 ; implictCastingIn -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from hosts | where mv_first(ip0) in ( "127.0.0.1", "::1") | keep host, ip0 | sort host, ip0; host:keyword | ip0:ip diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec index e0604acbcce1d..4e080bac0ed2e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec @@ -201,7 +201,7 @@ height:double | s:double ; powSalarySquared -required_feature: esql.pow_double +required_capability: pow_double from employees | eval s = pow(to_long(salary) - 75000, 2) + 10000 | keep salary, s | sort salary desc | limit 4; @@ -618,7 +618,7 @@ base:double | exponent:integer | result:double ; powIntInt -required_feature: esql.pow_double +required_capability: pow_double ROW base = 2, exponent = 2 | EVAL s = POW(base, exponent) @@ -629,7 +629,7 @@ base:integer | exponent:integer | s:double ; powIntIntPlusInt -required_feature: esql.pow_double +required_capability: pow_double row s = 1 + pow(2, 2); @@ -645,7 +645,7 @@ s:double ; powIntUL -required_feature: esql.pow_double +required_capability: pow_double row x = pow(1, 9223372036854775808); @@ -654,7 +654,7 @@ x:double ; powLongUL -required_feature: esql.pow_double +required_capability: pow_double row x = to_long(1) | eval x = pow(x, 9223372036854775808); @@ -663,7 +663,7 @@ x:double ; powUnsignedLongUL -required_feature: esql.pow_double +required_capability: pow_double row x = to_ul(1) | eval x = pow(x, 9223372036854775808); @@ -688,7 +688,7 @@ null ; powULInt -required_feature: esql.pow_double +required_capability: pow_double row x = pow(to_unsigned_long(9223372036854775807), 1); @@ -697,7 +697,7 @@ x:double ; powULIntOverrun -required_feature: esql.pow_double +required_capability: pow_double ROW x = POW(9223372036854775808, 2) ; @@ -719,7 +719,7 @@ x:double ; powULLong -required_feature: esql.pow_double +required_capability: pow_double row x = to_long(10) | eval x = pow(to_unsigned_long(10), x); @@ -728,7 +728,7 @@ x:double ; powULLongOverrun -required_feature: esql.pow_double +required_capability: pow_double row x = to_long(100) | eval x = pow(to_unsigned_long(10), x); @@ -1414,7 +1414,7 @@ Anneke |Preusig |1.56 |1.56 ; evalAbsString -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting ROW number = -1.0 | EVAL abs_number = ABS("10.0") @@ -1425,7 +1425,7 @@ number:double | abs_number:double ; functionUnderArithmeticOperationAggString -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting ROW a = 1 | eval x = date_trunc(1 month, "2024-11-22") + 2 days, y = x + 3 days @@ -1437,7 +1437,7 @@ count():long | y:date ; functionUnderArithmeticOperationString -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting from employees | eval x = date_trunc(1 month, "2024-11-22") + 2 days, y = x + 3 days diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata.csv-spec index bcb9718048085..b4cd18f728858 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata.csv-spec @@ -1,5 +1,5 @@ simpleKeep -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _index, _version | sort emp_no | limit 2 | keep emp_no, _index, _version; emp_no:integer |_index:keyword |_version:long @@ -8,7 +8,7 @@ emp_no:integer |_index:keyword |_version:long ; aliasWithSameName -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _index, _version | sort emp_no | limit 2 | eval _index = _index, _version = _version | keep emp_no, _index, _version; emp_no:integer |_index:keyword |_version:long @@ -17,7 +17,7 @@ emp_no:integer |_index:keyword |_version:long ; inComparison -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _index, _version | sort emp_no | where _index == "employees" | where _version == 1 | keep emp_no | limit 2; emp_no:integer @@ -26,7 +26,7 @@ emp_no:integer ; metaIndexInAggs -required_feature: esql.metadata_fields +required_capability: metadata_fields // tag::metaIndexInAggs[] FROM employees METADATA _index, _id | STATS max = MAX(emp_no) BY _index @@ -40,7 +40,7 @@ max:integer |_index:keyword ; metaIndexAliasedInAggs -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _index | eval _i = _index | stats max = max(emp_no) by _i; @@ -49,7 +49,7 @@ max:integer |_i:keyword ; metaVersionInAggs -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _version | stats min = min(emp_no) by _version; min:integer |_version:long @@ -57,7 +57,7 @@ min:integer |_version:long ; metaVersionAliasedInAggs -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _version | eval _v = _version | stats min = min(emp_no) by _v; min:integer |_v:long @@ -65,7 +65,7 @@ min:integer |_v:long ; inAggsAndAsGroups -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _index, _version | stats max = max(_version) by _index; max:long |_index:keyword @@ -73,7 +73,7 @@ max:long |_index:keyword ; inAggsAndAsGroupsAliased -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _index, _version | eval _i = _index, _v = _version | stats max = max(_v) by _i; max:long |_i:keyword @@ -81,7 +81,7 @@ max:long |_i:keyword ; inFunction -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _index, _version | sort emp_no | where length(_index) == length("employees") | where abs(_version) == 1 | keep emp_no | limit 2; emp_no:integer @@ -90,7 +90,7 @@ emp_no:integer ; inArithmetics -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _index, _version | eval i = _version + 2 | stats min = min(emp_no) by i; min:integer |i:long @@ -98,7 +98,7 @@ min:integer |i:long ; inSort -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _index, _version | sort _version, _index, emp_no | keep emp_no, _version, _index | limit 2; emp_no:integer |_version:long |_index:keyword @@ -107,7 +107,7 @@ emp_no:integer |_version:long |_index:keyword ; withMvFunction -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _version | eval i = mv_avg(_version) + 2 | stats min = min(emp_no) by i; min:integer |i:double @@ -115,7 +115,7 @@ min:integer |i:double ; overwritten -required_feature: esql.metadata_fields +required_capability: metadata_fields from employees metadata _index, _version | sort emp_no | eval _index = 3, _version = "version" | keep emp_no, _index, _version | limit 3; emp_no:integer |_index:integer |_version:keyword @@ -125,7 +125,7 @@ emp_no:integer |_index:integer |_version:keyword ; multipleIndices -required_feature: esql.metadata_fields +required_capability: metadata_fields // tag::multipleIndices[] FROM ul_logs, apps METADATA _index, _version | WHERE id IN (13, 14) AND _version == 1 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec index 26fcca423d28d..6d6b3b0782a98 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial.csv-spec @@ -3,7 +3,7 @@ ############################################### convertFromStringQuantize -required_feature: esql.spatial_points +required_capability: spatial_points row wkt = "POINT(42.97109629958868 14.7552534006536)" | eval pt = to_geopoint(wkt); @@ -13,7 +13,7 @@ POINT(42.97109629958868 14.7552534006536) |POINT(42.97109629958868 14.7552534006 ; convertFromString -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source // tag::to_geopoint-str[] ROW wkt = "POINT(42.97109630194 14.7552534413725)" @@ -28,7 +28,7 @@ wkt:keyword |pt:geo_point ; convertFromStringArray -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source row wkt = ["POINT(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)"] | eval pt = to_geopoint(wkt); @@ -38,7 +38,7 @@ wkt:keyword ; centroidFromStringNested -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg row wkt = "POINT(42.97109629958868 14.7552534006536)" | STATS c = ST_CENTROID_AGG(TO_GEOPOINT(wkt)); @@ -48,7 +48,7 @@ POINT(42.97109629958868 14.7552534006536) ; centroidFromString1 -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg ROW wkt = ["POINT(42.97109629958868 14.7552534006536)"] | MV_EXPAND wkt @@ -60,7 +60,7 @@ POINT(42.97109629958868 14.7552534006536) ; centroidFromString2 -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg ROW wkt = ["POINT(42.97109629958868 14.7552534006536)", "POINT(75.80929149873555 22.72774917539209)"] | MV_EXPAND wkt @@ -72,7 +72,7 @@ POINT(59.390193899162114 18.741501288022846) ; centroidFromString3 -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg ROW wkt = ["POINT(42.97109629958868 14.7552534006536)", "POINT(75.80929149873555 22.72774917539209)", "POINT(-0.030548143003023033 24.37553649504829)"] | MV_EXPAND wkt @@ -84,7 +84,7 @@ POINT(39.58327988510707 20.619513023697994) ; centroidFromString4 -required_feature: esql.st_x_y +required_capability: st_x_y ROW wkt = ["POINT(42.97109629958868 14.7552534006536)", "POINT(75.80929149873555 22.72774917539209)", "POINT(-0.030548143003023033 24.37553649504829)"] | MV_EXPAND wkt @@ -97,7 +97,7 @@ POINT(39.58327988510707 20.619513023697994) | 39.58327988510707 | 20.61951302369 ; stXFromString -required_feature: esql.st_x_y +required_capability: st_x_y // tag::st_x_y[] ROW point = TO_GEOPOINT("POINT(42.97109629958868 14.7552534006536)") @@ -112,7 +112,7 @@ POINT(42.97109629958868 14.7552534006536) | 42.97109629958868 | 14.755253400653 ; simpleLoad -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source FROM airports | WHERE scalerank == 9 | SORT abbrev | WHERE length(name) > 12; @@ -132,7 +132,7 @@ ZAH | Zāhedān | POINT(60.8628 29.4964) | Iran ; stXFromAirportsSupportsNull -required_feature: esql.st_x_y +required_capability: st_x_y FROM airports | EVAL x = FLOOR(ABS(ST_X(city_location))/200), y = FLOOR(ABS(ST_Y(city_location))/100) @@ -149,7 +149,7 @@ c:long | x:double | y:double # Tests for ST_CENTROID on GEO_POINT type centroidFromAirports -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg // tag::st_centroid_agg-airports[] FROM airports @@ -164,7 +164,7 @@ POINT(-0.030548143003023033 24.37553649504829) ; centroidFromAirportsNested -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | STATS centroid=ST_CENTROID_AGG(TO_GEOPOINT(location)) @@ -175,7 +175,7 @@ POINT (-0.03054810272375508 24.37553651570554) ; centroidFromAirportsCount -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | STATS centroid=ST_CENTROID_AGG(location), count=COUNT() @@ -186,7 +186,7 @@ POINT(-0.030548143003023033 24.37553649504829) | 891 ; centroidFromAirportsCountGrouped -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | STATS centroid=ST_CENTROID_AGG(location), count=COUNT() BY scalerank @@ -205,7 +205,7 @@ POINT(1.2588642098541771 24.379140841774642) | 63 | 2 ; centroidFromAirportsFiltered -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | WHERE scalerank == 9 @@ -217,7 +217,7 @@ POINT(83.27726172452623 28.99289782286029) | 33 ; centroidFromAirportsCountGroupedCentroid -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | STATS centroid=ST_CENTROID_AGG(location), count=COUNT() BY scalerank @@ -229,7 +229,7 @@ POINT (7.572387259169772 26.836561792945492) | 891 ; centroidFromAirportsCountCityLocations -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | STATS centroid=ST_CENTROID_AGG(city_location), count=COUNT() @@ -240,7 +240,7 @@ POINT (1.3965610809060276 24.127649406297987) | 891 ; centroidFromAirportsCountGroupedCountry -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | STATS centroid=ST_CENTROID_AGG(city_location), count=COUNT() BY country @@ -269,7 +269,7 @@ POINT (70.7946499697864 30.69746997440234) | 10 | Pakistan ; centroidFromAirportsFilteredCountry -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | WHERE country == "United States" @@ -281,7 +281,7 @@ POINT (-97.3333946136801 38.07953176370194) | 129 ; centroidFromAirportsCountGroupedCountryCentroid -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | STATS centroid=ST_CENTROID_AGG(city_location), count=COUNT() BY country @@ -293,7 +293,7 @@ POINT (17.55538044598613 18.185558743854063) | 891 ; centroidFromAirportsCountryCount -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | STATS airports=ST_CENTROID_AGG(location), cities=ST_CENTROID_AGG(city_location), count=COUNT() @@ -304,7 +304,7 @@ POINT(-0.030548143003023033 24.37553649504829) | POINT (1.3965610809060276 24.12 ; centroidFromAirportsFilteredAndSorted -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | WHERE scalerank == 9 @@ -318,7 +318,7 @@ POINT(78.73736493755132 26.761841227998957) | 12 ; centroidFromAirportsAfterMvExpand -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | MV_EXPAND type @@ -330,7 +330,7 @@ POINT(2.121611400672094 24.559172889205755) | 933 ; centroidFromAirportsGroupedAfterMvExpand -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | MV_EXPAND type @@ -350,7 +350,7 @@ POINT(1.2588642098541771 24.379140841774642) | 63 | 2 ; centroidFromAirportsGroupedAfterMvExpandFiltered -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | WHERE scalerank == 9 @@ -363,7 +363,7 @@ POINT(83.16847535921261 28.79002037679311) | 40 | 9 ; centroidFromAirportsAfterMvExpandFiltered -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | WHERE scalerank == 9 @@ -376,7 +376,7 @@ POINT(83.16847535921261 28.79002037679311) | 40 ; centroidFromAirportsAfterKeywordPredicateCountryUK -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports | WHERE country == "United Kingdom" @@ -388,7 +388,7 @@ POINT (-2.597342072712148 54.33551226578214) | 17 ; centroidFromAirportsAfterIntersectsPredicateCountryUK -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports | WHERE ST_INTERSECTS(location, TO_GEOSHAPE("POLYGON((1.2305 60.8449, -1.582 61.6899, -10.7227 58.4017, -7.1191 55.3291, -7.9102 54.2139, -5.4492 54.0078, -5.2734 52.3756, -7.8223 49.6676, -5.0977 49.2678, 0.9668 50.5134, 2.5488 52.1065, 2.6367 54.0078, -0.9668 56.4625, 1.2305 60.8449))")) @@ -400,7 +400,7 @@ POINT (-2.597342072712148 54.33551226578214) | 17 ; centroidFromAirportsAfterContainsPredicateCountryUK -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports | WHERE ST_CONTAINS(TO_GEOSHAPE("POLYGON((1.2305 60.8449, -1.582 61.6899, -10.7227 58.4017, -7.1191 55.3291, -7.9102 54.2139, -5.4492 54.0078, -5.2734 52.3756, -7.8223 49.6676, -5.0977 49.2678, 0.9668 50.5134, 2.5488 52.1065, 2.6367 54.0078, -0.9668 56.4625, 1.2305 60.8449))"), location) @@ -412,7 +412,7 @@ POINT (-2.597342072712148 54.33551226578214) | 17 ; centroidFromAirportsAfterWithinPredicateCountryUK -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports | WHERE ST_WITHIN(location, TO_GEOSHAPE("POLYGON((1.2305 60.8449, -1.582 61.6899, -10.7227 58.4017, -7.1191 55.3291, -7.9102 54.2139, -5.4492 54.0078, -5.2734 52.3756, -7.8223 49.6676, -5.0977 49.2678, 0.9668 50.5134, 2.5488 52.1065, 2.6367 54.0078, -0.9668 56.4625, 1.2305 60.8449))")) @@ -424,7 +424,7 @@ POINT (-2.597342072712148 54.33551226578214) | 17 ; intersectsAfterCentroidFromAirportsAfterKeywordPredicateCountryUK -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports | WHERE country == "United Kingdom" @@ -443,7 +443,7 @@ POINT (-2.597342072712148 54.33551226578214) | 17 | true ; centroidFromAirportsAfterIntersectsEvalExpression -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports | EVAL in_uk = ST_INTERSECTS(location, TO_GEOSHAPE("POLYGON((1.2305 60.8449, -1.582 61.6899, -10.7227 58.4017, -7.1191 55.3291, -7.9102 54.2139, -5.4492 54.0078, -5.2734 52.3756, -7.8223 49.6676, -5.0977 49.2678, 0.9668 50.5134, 2.5488 52.1065, 2.6367 54.0078, -0.9668 56.4625, 1.2305 60.8449))")) @@ -461,7 +461,7 @@ POINT (0.04453958108176276 23.74658354606057) | 873 | false ; centroidFromAirportsAfterIntersectsPredicate -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports | WHERE ST_INTERSECTS(location, TO_GEOSHAPE("POLYGON((42 14, 43 14, 43 15, 42 15, 42 14))")) @@ -473,7 +473,7 @@ POINT (42.97109629958868 14.7552534006536) | 1 ; centroidFromAirportsAfterIntersectsCompoundPredicate -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports | WHERE scalerank == 9 AND ST_INTERSECTS(location, TO_GEOSHAPE("POLYGON((42 14, 43 14, 43 15, 42 15, 42 14))")) AND country == "Yemen" @@ -488,7 +488,7 @@ POINT (42.97109629958868 14.7552534006536) | 1 # Tests for ST_INTERSECTS on GEO_POINT type pointIntersectsLiteralPolygon -required_feature: esql.st_intersects +required_capability: st_intersects // tag::st_intersects-airports[] FROM airports @@ -503,7 +503,7 @@ HOD | Al Ḩudaydah | POINT(42.9511 14.8022) | Yemen ; pointIntersectsLiteralPolygonReversed -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports | WHERE ST_INTERSECTS(TO_GEOSHAPE("POLYGON((42 14, 43 14, 43 15, 42 15, 42 14))"), location) @@ -514,7 +514,7 @@ HOD | Al Ḩudaydah | POINT(42.9511 14.8022) | Yemen ; literalPointIntersectsLiteralPolygon -required_feature: esql.st_intersects +required_capability: st_intersects ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -528,7 +528,7 @@ wkt:keyword | pt:geo_point ; literalPointIntersectsLiteralPolygonReversed -required_feature: esql.st_intersects +required_capability: st_intersects ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -542,7 +542,7 @@ wkt:keyword | pt:geo_point ; literalPointIntersectsLiteralPolygonOneRow -required_feature: esql.st_intersects +required_capability: st_intersects ROW intersects = ST_INTERSECTS(TO_GEOPOINT("POINT(0 0)"), TO_GEOSHAPE("POLYGON((0 -1, 1 -1, 1 1, 0 1, 0 -1))")) ; @@ -552,7 +552,7 @@ true ; cityInCityBoundary -required_feature: esql.st_intersects +required_capability: st_intersects FROM airport_city_boundaries | EVAL in_city = ST_INTERSECTS(city_location, city_boundary) @@ -568,7 +568,7 @@ cardinality:k | in_city:boolean ; cityNotInCityBoundaryBiggest -required_feature: esql.st_intersects +required_capability: st_intersects FROM airport_city_boundaries | WHERE NOT ST_INTERSECTS(city_location, city_boundary) @@ -583,7 +583,7 @@ SYX | Sanya Phoenix Int'l | Sanya | POINT(109.5036 18.253 ; airportCityLocationPointIntersection -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_mp | WHERE ST_INTERSECTS(location, city_location) @@ -594,7 +594,7 @@ XXX | Atlantis | POINT(0 0) | Atlantis ; airportCityLocationPointIntersectionCentroid -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_mp | WHERE ST_INTERSECTS(location, city_location) @@ -609,7 +609,7 @@ POINT (0 0) | POINT (0 0) | 1 # Tests for ST_DISJOINT on GEO_POINT type literalPolygonDisjointLiteralPoint -required_feature: esql.st_disjoint +required_capability: st_disjoint ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -623,7 +623,7 @@ wkt:keyword | pt:geo_point ; literalPointDisjointLiteralPolygon -required_feature: esql.st_disjoint +required_capability: st_disjoint ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -637,7 +637,7 @@ wkt:keyword | pt:geo_point ; literalPolygonDisjointLiteralPointOneRow -required_feature: esql.st_disjoint +required_capability: st_disjoint ROW disjoint = ST_DISJOINT(TO_GEOSHAPE("POLYGON((0 -1, 1 -1, 1 1, 0 1, 0 -1))"), TO_GEOPOINT("POINT(0 0)")) ; @@ -647,7 +647,7 @@ false ; literalPointDisjointLiteralPolygonOneRow -required_feature: esql.st_disjoint +required_capability: st_disjoint ROW disjoint = ST_DISJOINT(TO_GEOPOINT("POINT(-1 0)"), TO_GEOSHAPE("POLYGON((0 -1, 1 -1, 1 1, 0 1, 0 -1))")) ; @@ -657,7 +657,7 @@ true ; pointDisjointLiteralPolygon -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM airports | WHERE ST_DISJOINT(location, TO_GEOSHAPE("POLYGON((-10 -60, 120 -60, 120 60, -10 60, -10 -60))")) @@ -679,7 +679,7 @@ x:double | y:double | count:long ; airportCityLocationPointDisjointCentroid -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM airports_mp | WHERE ST_DISJOINT(location, city_location) @@ -694,7 +694,7 @@ POINT (67.8581917192787 24.02956652920693) | POINT (67.81638333333332 24.0489999 # Tests for ST_CONTAINS on GEO_POINT type literalPolygonContainsLiteralPoint -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -708,7 +708,7 @@ wkt:keyword | pt:geo_point ; literalPointDoesNotContainLiteralPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -720,7 +720,7 @@ wkt:keyword | pt:geo_point ; literalPolygonContainsLiteralPointOneRow -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW contains = ST_CONTAINS(TO_GEOSHAPE("POLYGON((0 -1, 1 -1, 1 1, 0 1, 0 -1))"), TO_GEOPOINT("POINT(0 0)")) ; @@ -730,7 +730,7 @@ true ; literalPointDoesNotContainLiteralPolygonOneRow -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW contains = ST_CONTAINS(TO_GEOPOINT("POINT(0 0)"), TO_GEOSHAPE("POLYGON((0 -1, 1 -1, 1 1, 0 1, 0 -1))")) ; @@ -740,7 +740,7 @@ false ; pointContainsLiteralPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports | WHERE ST_CONTAINS(location, TO_GEOSHAPE("POLYGON((42 14, 43 14, 43 15, 42 15, 42 14))")) @@ -750,7 +750,7 @@ abbrev:keyword | city:keyword | city_location:geo_point | country:keyword ; pointContainedInLiteralPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports | WHERE ST_CONTAINS(TO_GEOSHAPE("POLYGON((42 14, 43 14, 43 15, 42 15, 42 14))"), location) @@ -761,7 +761,7 @@ HOD | Al Ḩudaydah | POINT(42.9511 14.8022) | Yemen ; airportCityLocationPointContains -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports_mp | WHERE ST_CONTAINS(location, city_location) @@ -772,7 +772,7 @@ XXX | Atlantis | POINT(0 0) | Atlantis ; airportCityLocationPointContainsCentroid -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports_mp | WHERE ST_CONTAINS(location, city_location) @@ -787,7 +787,7 @@ POINT (0 0) | POINT (0 0) | 1 # Tests for ST_WITHIN on GEO_POINT type literalPolygonNotWithinLiteralPoint -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -799,7 +799,7 @@ wkt:keyword | pt:geo_point ; literalPointWithinLiteralPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -813,7 +813,7 @@ wkt:keyword | pt:geo_point ; literalPolygonNotWithinLiteralPointOneRow -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW within = ST_WITHIN(TO_GEOSHAPE("POLYGON((0 -1, 1 -1, 1 1, 0 1, 0 -1))"), TO_GEOPOINT("POINT(0 0)")) ; @@ -823,7 +823,7 @@ false ; literalPointWithinLiteralPolygonOneRow -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW within = ST_WITHIN(TO_GEOPOINT("POINT(0 0)"), TO_GEOSHAPE("POLYGON((0 -1, 1 -1, 1 1, 0 1, 0 -1))")) ; @@ -833,7 +833,7 @@ true ; pointWithinLiteralPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within // tag::st_within-airports[] FROM airports @@ -848,7 +848,7 @@ HOD | Al Ḩudaydah | POINT(42.9511 14.8022) | Yemen ; airportCityLocationPointWithin -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports_mp | WHERE ST_WITHIN(location, city_location) @@ -859,7 +859,7 @@ XXX | Atlantis | POINT(0 0) | Atlantis ; airportCityLocationPointWithinCentroid -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports_mp | WHERE ST_WITHIN(location, city_location) @@ -874,7 +874,7 @@ POINT (0 0) | POINT (0 0) | 1 # Tests for Equality and casting with GEO_POINT geoPointEquals -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source // tag::to_geopoint-equals[] ROW wkt = ["POINT(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)"] @@ -891,7 +891,7 @@ wkt:keyword |pt:geo_point ; geoPointNotEquals -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source // tag::to_geopoint-not-equals[] ROW wkt = ["POINT(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)"] @@ -908,7 +908,7 @@ wkt:keyword |pt:geo_point ; convertFromStringParseError -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source // tag::to_geopoint-str-parse-error[] row wkt = ["POINTX(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)", "POINT(111)"] @@ -936,7 +936,7 @@ wkt:keyword |pt:geo_point ############################################### convertCartesianFromString -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source // tag::to_cartesianpoint-str[] ROW wkt = ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] @@ -953,7 +953,7 @@ wkt:keyword |pt:cartesian_point ; convertCartesianFromStringArray -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source row wkt = ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] | eval pt = to_cartesianpoint(wkt); @@ -963,7 +963,7 @@ wkt:keyword |pt:cartesian_point ; centroidCartesianFromStringNested -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg row wkt = "POINT(4297.10986328125 -1475.530029296875)" | STATS c = ST_CENTROID_AGG(TO_CARTESIANPOINT(wkt)); @@ -973,7 +973,7 @@ POINT(4297.10986328125 -1475.530029296875) ; centroidFromCartesianString1 -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg ROW wkt = ["POINT(4297.10986328125 -1475.530029296875)"] | MV_EXPAND wkt @@ -985,7 +985,7 @@ POINT(4297.10986328125 -1475.530029296875) ; centroidFromCartesianString2 -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg ROW wkt = ["POINT(4297.10986328125 -1475.530029296875)", "POINT(7580.93017578125 2272.77001953125)"] | MV_EXPAND wkt @@ -997,7 +997,7 @@ POINT(5939.02001953125 398.6199951171875) ; centroidFromCartesianString3 -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg ROW wkt = ["POINT(4297.10986328125 -1475.530029296875)", "POINT(7580.93017578125 2272.77001953125)", "POINT(-30.548143003023033 2437.553649504829)"] | MV_EXPAND wkt @@ -1009,7 +1009,7 @@ POINT(3949.163965353159 1078.2645465797348) ; stXFromCartesianString -required_feature: esql.st_x_y +required_capability: st_x_y ROW point = TO_CARTESIANPOINT("POINT(4297.10986328125 -1475.530029296875)") | EVAL x = ST_X(point), y = ST_Y(point) @@ -1020,7 +1020,7 @@ POINT(4297.10986328125 -1475.530029296875) | 4297.10986328125 | -1475.530029296 ; simpleCartesianLoad -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source FROM airports_web | WHERE scalerank == 9 | SORT abbrev | WHERE length(name) > 12; @@ -1039,7 +1039,7 @@ ZAH | POINT (6779435.866395892 3436280.545331025) | Zahedan Int'l # Tests for ST_CENTROID on CARTESIAN_POINT type cartesianCentroidFromAirports -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports_web | STATS centroid=ST_CENTROID_AGG(location); @@ -1049,7 +1049,7 @@ POINT(-266681.67563861894 3053301.5120195406) ; cartesianCentroidFromAirportsNested -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports_web | STATS centroid=ST_CENTROID_AGG(TO_CARTESIANPOINT(location)); @@ -1059,7 +1059,7 @@ POINT (-266681.66530554957 3053301.506061676) ; cartesianCentroidFromAirportsCount -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports_web | STATS centroid=ST_CENTROID_AGG(location), count=COUNT() @@ -1070,7 +1070,7 @@ POINT(-266681.67563861894 3053301.5120195406) | 849 ; cartesianCentroidFromAirportsCountGrouped -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports_web | STATS centroid=ST_CENTROID_AGG(location), count=COUNT() BY scalerank @@ -1089,7 +1089,7 @@ POINT(140136.12878224207 3081220.7881944445) | 63 | 2 ; cartesianCentroidFromAirportsFiltered -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports_web | WHERE scalerank == 9 @@ -1101,7 +1101,7 @@ POINT(9289013.153846154 3615537.0533353365) | 26 ; cartesianCentroidFromAirportsFilteredAndSorted -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports_web | WHERE scalerank == 9 @@ -1115,7 +1115,7 @@ POINT(9003597.4375 3429344.0078125) | 8 ; cartesianCentroidFromAirportsCountGroupedCentroid -required_feature: esql.st_centroid_agg +required_capability: st_centroid_agg FROM airports_web | STATS centroid=ST_CENTROID_AGG(location), count=COUNT() BY scalerank @@ -1130,7 +1130,7 @@ POINT (726480.0130685265 3359566.331716279) | 849 # Tests for ST_INTERSECTS on CARTESIAN_POINT type cartesianCentroidFromAirportsAfterIntersectsPredicate -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_INTERSECTS(location, TO_CARTESIANSHAPE("POLYGON((4700000 1600000, 4800000 1600000, 4800000 1700000, 4700000 1700000, 4700000 1600000))")) @@ -1142,7 +1142,7 @@ POINT (4783520.5 1661010.0) | 1 ; cartesianPointIntersectsPolygon -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_INTERSECTS(location, TO_CARTESIANSHAPE("POLYGON((4700000 1600000, 4800000 1600000, 4800000 1700000, 4700000 1700000, 4700000 1600000))")) @@ -1153,7 +1153,7 @@ HOD | POINT (4783520.559160681 1661010.0197476079) | Hodeidah Int'l | ; literalCartesianPointIntersectsPolygon -required_feature: esql.st_intersects +required_capability: st_intersects ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -1167,7 +1167,7 @@ wkt:keyword | pt:cartesian_point ; cartesianPointIntersectsPointShape -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_INTERSECTS(location, TO_CARTESIANSHAPE("POINT(4783520.559160681 1661010.0197476079)")) @@ -1178,7 +1178,7 @@ HOD | POINT (4783520.559160681 1661010.0197476079) | Hodeidah Int'l | ; cartesianPointIntersectsPoint -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_INTERSECTS(location, TO_CARTESIANPOINT("POINT(4783520.559160681 1661010.0197476079)")) @@ -1189,7 +1189,7 @@ HOD | POINT (4783520.559160681 1661010.0197476079) | Hodeidah Int'l | ; cartesianPointIntersectsMultiPoint -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_INTERSECTS(location, TO_CARTESIANSHAPE("MULTIPOINT(4783520.559160681 1661010.0197476079, 1408119.2975413958 7484813.53657096)")) @@ -1202,7 +1202,7 @@ CPH | POINT (1408119.2975413958 7484813.53657096) | Copenhagen | ; cartesianPointIntersectsLineString -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_INTERSECTS(location, TO_CARTESIANSHAPE("LINESTRING(4783520.559160681 1661010.0197476079, 1408119.2975413958 7484813.53657096)")) @@ -1215,7 +1215,7 @@ CPH | POINT (1408119.2975413958 7484813.53657096) | Copenhagen | ; cartesianPointIntersectsMultiLineString -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_INTERSECTS(location, TO_CARTESIANSHAPE("MULTILINESTRING((4783520.559160681 1661010.0197476079, 1408119.2975413958 7484813.53657096),(1408119.2975413958 7484813.53657096, 1996039.722208033 8322469.9470024165))")) @@ -1229,7 +1229,7 @@ ARN | POINT(1996039.722208033 8322469.9470024165) | Arlanda | ; cartesianPointIntersectsPointShapeWithCentroid -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_INTERSECTS(location, TO_CARTESIANSHAPE("POINT(4783520.559160681 1661010.0197476079)")) @@ -1241,7 +1241,7 @@ POINT (4783520.5 1661010.0) | 1 ; cartesianPointIntersectsPointWithCentroid -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_INTERSECTS(location, TO_CARTESIANPOINT("POINT(4783520.559160681 1661010.0197476079)")) @@ -1253,7 +1253,7 @@ POINT (4783520.5 1661010.0) | 1 ; cartesianPointIntersectsLiteralPolygonCount -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_INTERSECTS(location, TO_CARTESIANSHAPE("POLYGON((0 -60000000, 120000000 -60000000, 120000000 60000000, 0 60000000, 0 -60000000))")) @@ -1268,7 +1268,7 @@ count:long # Tests for ST_DISJOINT on CARTESIAN_POINT type literalPolygonDisjointLiteralCartesianPoint -required_feature: esql.st_disjoint +required_capability: st_disjoint ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -1282,7 +1282,7 @@ wkt:keyword | pt:cartesian_point ; literalCartesianPointDisjointLiteralPolygon -required_feature: esql.st_disjoint +required_capability: st_disjoint ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -1296,7 +1296,7 @@ wkt:keyword | pt:cartesian_point ; literalPolygonDisjointLiteralCartesianPointOneRow -required_feature: esql.st_disjoint +required_capability: st_disjoint ROW disjoint = ST_DISJOINT(TO_CARTESIANSHAPE("POLYGON((0 -1, 1 -1, 1 1, 0 1, 0 -1))"), TO_CARTESIANPOINT("POINT(0 0)")) ; @@ -1306,7 +1306,7 @@ false ; literalCartesianPointDisjointLiteralPolygonOneRow -required_feature: esql.st_disjoint +required_capability: st_disjoint ROW disjoint = ST_DISJOINT(TO_CARTESIANPOINT("POINT(-1 0)"), TO_CARTESIANSHAPE("POLYGON((0 -1, 1 -1, 1 1, 0 1, 0 -1))")) ; @@ -1316,7 +1316,7 @@ true ; cartesianPointDisjointLiteralPolygonCount -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM airports_web | WHERE ST_DISJOINT(location, TO_CARTESIANSHAPE("POLYGON((0 -60000000, 120000000 -60000000, 120000000 60000000, 0 60000000, 0 -60000000))")) @@ -1328,7 +1328,7 @@ count:long ; cartesianPointIntersectsDisjointLiteralPolygonCount -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM airports_web | EVAL intersects = ST_INTERSECTS(location, TO_CARTESIANSHAPE("POLYGON((0 -60000000, 120000000 -60000000, 120000000 60000000, 0 60000000, 0 -60000000))")) @@ -1344,7 +1344,7 @@ false | true | 405 ; cartesianPointDisjointLiteralPolygon -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM airports_web | WHERE ST_DISJOINT(location, TO_CARTESIANSHAPE("POLYGON((0 -60000000, 120000000 -60000000, 120000000 60000000, 0 60000000, 0 -60000000))")) @@ -1365,7 +1365,7 @@ x:double | y:double | count:long ; cartesianPointDisjointEmptyGeometry -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM airports_web | WHERE ST_DISJOINT(location, TO_CARTESIANSHAPE("LINESTRING()")) @@ -1380,7 +1380,7 @@ count:long ; cartesianPointDisjointInvalidGeometry -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM airports_web | WHERE ST_DISJOINT(location, TO_CARTESIANSHAPE("Invalid Geometry")) @@ -1398,7 +1398,7 @@ count:long # Tests for ST_CONTAINS on CARTESIAN_POINT type cartesianCentroidFromAirportsAfterPolygonContainsPointPredicate -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports_web | WHERE ST_CONTAINS(TO_CARTESIANSHAPE("POLYGON((4700000 1600000, 4800000 1600000, 4800000 1700000, 4700000 1700000, 4700000 1600000))"), location) @@ -1410,7 +1410,7 @@ POINT (4783520.5 1661010.0) | 1 ; cartesianPolygonContainsPointPredicate -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports_web | WHERE ST_CONTAINS(TO_CARTESIANSHAPE("POLYGON((4700000 1600000, 4800000 1600000, 4800000 1700000, 4700000 1700000, 4700000 1600000))"), location) @@ -1421,7 +1421,7 @@ HOD | POINT (4783520.559160681 1661010.0197476079) | Hodeidah Int'l | ; literalCartesianPolygonContainsPointPredicate -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -1435,7 +1435,7 @@ wkt:keyword | pt:cartesian_point ; cartesianCentroidFromAirportsAfterPointContainsPolygonPredicate -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports_web | WHERE ST_CONTAINS(location, TO_CARTESIANSHAPE("POLYGON((4700000 1600000, 4800000 1600000, 4800000 1700000, 4700000 1700000, 4700000 1600000))")) @@ -1447,7 +1447,7 @@ POINT (NaN NaN) | 0 ; cartesianPointContainsPolygonPredicate -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports_web | WHERE ST_CONTAINS(location, TO_CARTESIANSHAPE("POLYGON((4700000 1600000, 4800000 1600000, 4800000 1700000, 4700000 1700000, 4700000 1600000))")) @@ -1457,7 +1457,7 @@ abbrev:keyword | location:cartesian_point | name:text | ; literalCartesianPointContainsPolygonPredicate -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -1469,7 +1469,7 @@ wkt:keyword | pt:cartesian_point ; cartesianPointContainsPointShape -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_CONTAINS(location, TO_CARTESIANSHAPE("POINT(4783520.559160681 1661010.0197476079)")) @@ -1480,7 +1480,7 @@ HOD | POINT (4783520.559160681 1661010.0197476079) | Hodeidah Int'l | ; cartesianPointContainsPoint -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_CONTAINS(location, TO_CARTESIANPOINT("POINT(4783520.559160681 1661010.0197476079)")) @@ -1491,7 +1491,7 @@ HOD | POINT (4783520.559160681 1661010.0197476079) | Hodeidah Int'l | ; cartesianPointContainsMultiPoint -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_CONTAINS(location, TO_CARTESIANSHAPE("MULTIPOINT(4783520.559160681 1661010.0197476079, 1408119.2975413958 7484813.53657096)")) @@ -1502,7 +1502,7 @@ abbrev:keyword | location:cartesian_point | name:text | ; cartesianPointContainsLineString -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_CONTAINS(location, TO_CARTESIANSHAPE("LINESTRING(4783520.559160681 1661010.0197476079, 1408119.2975413958 7484813.53657096)")) @@ -1513,7 +1513,7 @@ abbrev:keyword | location:cartesian_point | name:text | ; cartesianPointContainsMultiLineString -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_CONTAINS(location, TO_CARTESIANSHAPE("MULTILINESTRING((4783520.559160681 1661010.0197476079, 1408119.2975413958 7484813.53657096),(1408119.2975413958 7484813.53657096, 1996039.722208033 8322469.9470024165))")) @@ -1524,7 +1524,7 @@ abbrev:keyword | location:cartesian_point | name:text | ; cartesianPointContainsPointShapeWithCentroid -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_CONTAINS(location, TO_CARTESIANSHAPE("POINT(4783520.559160681 1661010.0197476079)")) @@ -1536,7 +1536,7 @@ POINT (4783520.5 1661010.0) | 1 ; cartesianPointContainsPointWithCentroid -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_CONTAINS(location, TO_CARTESIANPOINT("POINT(4783520.559160681 1661010.0197476079)")) @@ -1551,7 +1551,7 @@ POINT (4783520.5 1661010.0) | 1 # Tests for ST_WITHIN on CARTESIAN_POINT type cartesianCentroidFromAirportsAfterWithinPredicate -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports_web | WHERE ST_WITHIN(location, TO_CARTESIANSHAPE("POLYGON((4700000 1600000, 4800000 1600000, 4800000 1700000, 4700000 1700000, 4700000 1600000))")) @@ -1563,7 +1563,7 @@ POINT (4783520.5 1661010.0) | 1 ; cartesianPointWithinPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM airports_web | WHERE ST_WITHIN(location, TO_CARTESIANSHAPE("POLYGON((4700000 1600000, 4800000 1600000, 4800000 1700000, 4700000 1700000, 4700000 1600000))")) @@ -1574,7 +1574,7 @@ HOD | POINT (4783520.559160681 1661010.0197476079) | Hodeidah Int'l | ; literalCartesianPointWithinPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -1588,7 +1588,7 @@ wkt:keyword | pt:cartesian_point ; cartesianPointWithinPointShape -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_WITHIN(location, TO_CARTESIANSHAPE("POINT(4783520.559160681 1661010.0197476079)")) @@ -1599,7 +1599,7 @@ HOD | POINT (4783520.559160681 1661010.0197476079) | Hodeidah Int'l | ; cartesianPointWithinPoint -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_WITHIN(location, TO_CARTESIANPOINT("POINT(4783520.559160681 1661010.0197476079)")) @@ -1610,7 +1610,7 @@ HOD | POINT (4783520.559160681 1661010.0197476079) | Hodeidah Int'l | ; cartesianPointWithinMultiPoint -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_WITHIN(location, TO_CARTESIANSHAPE("MULTIPOINT(4783520.559160681 1661010.0197476079, 1408119.2975413958 7484813.53657096)")) @@ -1623,7 +1623,7 @@ CPH | POINT (1408119.2975413958 7484813.53657096) | Copenhagen | ; cartesianPointWithinLineString -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_WITHIN(location, TO_CARTESIANSHAPE("LINESTRING(4783520.559160681 1661010.0197476079, 1408119.2975413958 7484813.53657096)")) @@ -1636,7 +1636,7 @@ CPH | POINT (1408119.2975413958 7484813.53657096) | Copenhagen | ; cartesianPointWithinMultiLineString -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_WITHIN(location, TO_CARTESIANSHAPE("MULTILINESTRING((4783520.559160681 1661010.0197476079, 1408119.2975413958 7484813.53657096),(1408119.2975413958 7484813.53657096, 1996039.722208033 8322469.9470024165))")) @@ -1650,7 +1650,7 @@ ARN | POINT(1996039.722208033 8322469.9470024165) | Arlanda | ; cartesianPointWithinPointShapeWithCentroid -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_WITHIN(location, TO_CARTESIANSHAPE("POINT(4783520.559160681 1661010.0197476079)")) @@ -1662,7 +1662,7 @@ POINT (4783520.5 1661010.0) | 1 ; cartesianPointWithinPointWithCentroid -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | WHERE ST_WITHIN(location, TO_CARTESIANPOINT("POINT(4783520.559160681 1661010.0197476079)")) @@ -1677,7 +1677,7 @@ POINT (4783520.5 1661010.0) | 1 # Tests for Equality and casting with GEO_POINT cartesianPointEquals -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source // tag::to_cartesianpoint-equals[] ROW wkt = ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] @@ -1694,7 +1694,7 @@ wkt:keyword |pt:cartesian_point ; cartesianPointNotEquals -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source // tag::to_cartesianpoint-not-equals[] ROW wkt = ["POINT(4297.11 -1475.53)", "POINT(7580.93 2272.77)"] @@ -1711,7 +1711,7 @@ wkt:keyword |pt:cartesian_point ; convertCartesianFromStringParseError -required_feature: esql.spatial_points_from_source +required_capability: spatial_points_from_source // tag::to_cartesianpoint-str-parse-error[] row wkt = ["POINTX(4297.11 -1475.53)", "POINT(7580.93 2272.77)", "POINT(111)"] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec index 6d0d15c398986..dd092130c3406 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec @@ -3,7 +3,7 @@ # convertFromString -required_feature: esql.spatial_shapes +required_capability: spatial_shapes // tag::to_geoshape-str[] ROW wkt = "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))" @@ -18,7 +18,7 @@ wkt:keyword | geom:geo_shape ; convertFromStringArray -required_feature: esql.spatial_shapes +required_capability: spatial_shapes row wkt = ["POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))", "POINT(75.8092915005895 22.727749187571)"] | eval pt = to_geoshape(wkt); @@ -28,7 +28,7 @@ wkt:keyword ; convertFromStringViaPoint -required_feature: esql.spatial_shapes +required_capability: spatial_shapes ROW wkt = "POINT (30 10)" | EVAL point = TO_GEOPOINT(wkt) @@ -41,7 +41,7 @@ wkt:keyword | point:geo_point | shape:geo_shape # need to work out how to upload WKT simpleLoad -required_feature: esql.spatial_shapes +required_capability: spatial_shapes FROM countries_bbox | WHERE id == "ISL"; @@ -50,7 +50,7 @@ ISL|Iceland|BBOX(-24.538400, -13.499446, 66.536100, 63.390000) ; simpleLoadPointsAsShapes -required_feature: esql.spatial_shapes +required_capability: spatial_shapes FROM airports | WHERE abbrev == "CPH" OR abbrev == "VLC" @@ -80,7 +80,7 @@ CPH | Københavns Kommune | POINT(12.5683 55.6761) | Copenhagen # Tests for ST_INTERSECTS with GEO_SHAPE pointIntersectsLiteralPolygon -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports | EVAL location = TO_GEOSHAPE(location) @@ -93,7 +93,7 @@ HOD | Hodeidah Int'l | POINT(42.97109630194 14.7552534413725) | Yemen ; polygonIntersectsLiteralPolygon -required_feature: esql.st_intersects +required_capability: st_intersects FROM airport_city_boundaries | WHERE ST_INTERSECTS(city_boundary, TO_GEOSHAPE("POLYGON((109.4 18.1, 109.6 18.1, 109.6 18.3, 109.4 18.3, 109.4 18.1))")) @@ -106,7 +106,7 @@ SYX | Sanya Phoenix Int'l | 天涯区 | Sanya | POINT(1 ; pointIntersectsLiteralPolygonReversed -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports | EVAL location = TO_GEOSHAPE(location) @@ -119,7 +119,7 @@ HOD | Hodeidah Int'l | POINT(42.97109630194 14.7552534413725) | Yemen ; literalPointIntersectsLiteralPolygon -required_feature: esql.st_intersects +required_capability: st_intersects ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -133,7 +133,7 @@ wkt:keyword | pt:geo_point ; literalPointIntersectsLiteralPolygonReversed -required_feature: esql.st_intersects +required_capability: st_intersects ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -147,7 +147,7 @@ wkt:keyword | pt:geo_point ; literalPointAsShapeIntersectsLiteralPolygon -required_feature: esql.st_intersects +required_capability: st_intersects ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -161,7 +161,7 @@ wkt:keyword | pt:geo_shape ; literalPointAsShapeIntersectsLiteralPolygonReversed -required_feature: esql.st_intersects +required_capability: st_intersects ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -175,7 +175,7 @@ wkt:keyword | pt:geo_shape ; shapeIntersectsLiteralPolygon -required_feature: esql.st_intersects +required_capability: st_intersects FROM countries_bbox | WHERE ST_INTERSECTS(shape, TO_GEOSHAPE("POLYGON((29 -30, 31 -30, 31 -27.3, 29 -27.3, 29 -30))")) @@ -189,7 +189,7 @@ LSO | Lesotho | BBOX(27.013973, 29.455554, -28.570691, -30.650527) ; literalPolygonIntersectsLiteralPolygon -required_feature: esql.st_intersects +required_capability: st_intersects ROW wkt = ["POLYGON((-20 60, -6 60, -6 66, -20 66, -20 60))", "POLYGON((20 60, 6 60, 6 66, 20 66, 20 60))"] | EVAL other = TO_GEOSHAPE("POLYGON((-15 64, -10 64, -10 66, -15 66, -15 64))") @@ -204,7 +204,7 @@ wkt:keyword | shape:geo_shape ; literalPolygonIntersectsLiteralPolygonOneRow -required_feature: esql.st_intersects +required_capability: st_intersects ROW intersects = ST_INTERSECTS(TO_GEOSHAPE("POLYGON((-20 60, -6 60, -6 66, -20 66, -20 60))"), TO_GEOSHAPE("POLYGON((-15 64, -10 64, -10 66, -15 66, -15 64))")) ; @@ -217,7 +217,7 @@ true # Tests for ST_DISJOINT with GEO_SHAPE polygonDisjointLiteralPolygon -required_feature: esql.st_disjoint +required_capability: st_disjoint // tag::st_disjoint-airport_city_boundaries[] FROM airport_city_boundaries @@ -238,7 +238,7 @@ ACA | General Juan N Alvarez Int'l | Acapulco de Juárez | Acapulco d # Tests for ST_CONTAINS and ST_WITHIN with GEO_SHAPE polygonContainsLiteralPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within // tag::st_contains-airport_city_boundaries[] FROM airport_city_boundaries @@ -255,7 +255,7 @@ SYX | Sanya Phoenix Int'l | 天涯区 | Sanya | POINT(1 ; polygonWithinLiteralPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within // tag::st_within-airport_city_boundaries[] FROM airport_city_boundaries @@ -275,7 +275,7 @@ SYX | Sanya Phoenix Int'l | 天涯区 | Sanya | POINT(1 # Tests for Equality and casting with GEO_SHAPE geo_shapeEquals -required_feature: esql.spatial_shapes +required_capability: spatial_shapes ROW wkt = ["POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))", "POINT(75.8092915005895 22.727749187571)"] | MV_EXPAND wkt @@ -288,7 +288,7 @@ wkt:keyword |pt:geo_shape ; geo_shapeNotEquals -required_feature: esql.spatial_shapes +required_capability: spatial_shapes ROW wkt = ["POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))", "POINT(75.8092915005895 22.727749187571)"] | MV_EXPAND wkt @@ -301,7 +301,7 @@ wkt:keyword |pt:geo_shape ; convertFromStringParseError -required_feature: esql.spatial_shapes +required_capability: spatial_shapes row wkt = ["POINTX(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)", "POINT(111)"] | mv_expand wkt @@ -323,7 +323,7 @@ wkt:keyword |pt:geo_shape # convertCartesianShapeFromString -required_feature: esql.spatial_shapes +required_capability: spatial_shapes // tag::to_cartesianshape-str[] ROW wkt = ["POINT(4297.11 -1475.53)", "POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))"] @@ -340,7 +340,7 @@ wkt:keyword |geom:cartesian_shape ; convertCartesianFromStringArray -required_feature: esql.spatial_shapes +required_capability: spatial_shapes row wkt = ["POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] | eval pt = to_cartesianshape(wkt); @@ -350,7 +350,7 @@ wkt:keyword ; convertCartesianFromStringViaPoint -required_feature: esql.spatial_shapes +required_capability: spatial_shapes ROW wkt = "POINT (3010 -1010)" | EVAL point = TO_CARTESIANPOINT(wkt) @@ -363,7 +363,7 @@ wkt:keyword | point:cartesian_point | shape:cartesian_shape # need to work out how to upload WKT simpleCartesianShapeLoad -required_feature: esql.spatial_shapes +required_capability: spatial_shapes FROM countries_bbox_web | WHERE id == "ISL"; @@ -372,7 +372,7 @@ ISL|Iceland|BBOX(-2731602.192501422, -1502751.454502109, 1.0025136653899286E7, 9 ; simpleLoadCartesianPointsAsShapes -required_feature: esql.spatial_shapes +required_capability: spatial_shapes FROM airports_web | WHERE abbrev == "CPH" OR abbrev == "VLC" @@ -389,7 +389,7 @@ abbrev:keyword | name:text | scalerank:integer | type:keyword | location:cart # Tests for ST_INTERSECTS with CARTESIAN_SHAPE cartesianPointIntersectsPolygon -required_feature: esql.st_intersects +required_capability: st_intersects FROM airports_web | EVAL location = TO_CARTESIANSHAPE(location) @@ -402,7 +402,7 @@ HOD | Hodeidah Int'l | POINT (4783520.559160681 1661010.0197476079) | ; literalCartesianPointIntersectsPolygon -required_feature: esql.st_intersects +required_capability: st_intersects ROW wkt = ["POINT(1 1)", "POINT(-1 -1)", "POINT(-1 1)", "POINT(1 -1)"] | MV_EXPAND wkt @@ -416,7 +416,7 @@ wkt:keyword | pt:cartesian_shape ; cartesianShapeIntersectsPolygon -required_feature: esql.st_intersects +required_capability: st_intersects FROM countries_bbox_web | WHERE ST_INTERSECTS(shape, TO_CARTESIANSHAPE("POLYGON((3100000 -3400000, 3500000 -3400000, 3500000 -3150000, 3100000 -3150000, 3100000 -3400000))")) @@ -430,7 +430,7 @@ LSO | Lesotho | BBOX(3007181.718244638, 3278977.271857335, -3321117. ; literalCartesianPolygonIntersectsPolygon -required_feature: esql.st_intersects +required_capability: st_intersects ROW wkt = ["POLYGON((-2000 6000, -600 6000, -600 6600, -2000 6600, -2000 6000))", "POLYGON((2000 6000, 600 6000, 600 6600, 2000 6600, 2000 6000))"] | MV_EXPAND wkt @@ -447,7 +447,7 @@ wkt:keyword | shape:ca # Tests for ST_DISJOINT with CARTESIAN_SHAPE cartesianPolygonDisjointLiteralPolygon -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM countries_bbox_web | WHERE ST_DISJOINT(shape, TO_CARTESIANSHAPE("POLYGON((3100000 -3400000, 3500000 -3400000, 3500000 -3150000, 3100000 -3150000, 3100000 -3400000))")) @@ -460,7 +460,7 @@ ZWE | Zimbabwe | BBOX (2809472.180051312, 3681512.6693309383, -176035 ; cartesianPolygonDisjointEmptyGeometry -required_feature: esql.st_disjoint +required_capability: st_disjoint FROM countries_bbox_web | WHERE ST_DISJOINT(shape, TO_CARTESIANSHAPE("LINESTRING()")) @@ -478,7 +478,7 @@ count:long # Tests for ST_CONTAINS and ST_WITHIN with CARTESIAN_SHAPE cartesianShapeContainsPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM countries_bbox_web | WHERE ST_CONTAINS(shape, TO_CARTESIANSHAPE("POLYGON((3100000 -3400000, 3500000 -3400000, 3500000 -3150000, 3100000 -3150000, 3100000 -3400000))")) @@ -490,7 +490,7 @@ ZAF | South Africa | BBOX(1834915.5679635953, 4218142.412200545, -2527908 ; cartesianShapeWithinPolygon -required_feature: esql.st_contains_within +required_capability: st_contains_within FROM countries_bbox_web | WHERE ST_WITHIN(shape, TO_CARTESIANSHAPE("POLYGON((1800000 -2500000, 4300000 -2500000, 4300000 -6000000, 1800000 -6000000, 1800000 -2500000))")) @@ -507,7 +507,7 @@ LSO | Lesotho | BBOX(3007181.718244638, 3278977.271857335, -3321117. # Tests for Equality and casting with CARTESIAN_SHAPE cartesianshapeEquals -required_feature: esql.spatial_shapes +required_capability: spatial_shapes ROW wkt = ["POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] | MV_EXPAND wkt @@ -520,7 +520,7 @@ wkt:keyword |pt:cartesian_shape ; cartesianShapeNotEquals -required_feature: esql.spatial_shapes +required_capability: spatial_shapes ROW wkt = ["POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] | MV_EXPAND wkt @@ -533,7 +533,7 @@ wkt:keyword |pt:cartesian_shape ; convertCartesianShapeFromStringParseError -required_feature: esql.spatial_shapes +required_capability: spatial_shapes row wkt = ["POINTX(4297.11 -1475.53)", "POINT(7580.93 2272.77)", "POINT(111)"] | mv_expand wkt diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 5bdf0bd963fee..6322746318230 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -71,7 +71,7 @@ emp_no:integer | last_name:keyword | gender:keyword | f_l:boolean ; stringCast -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting ROW a = 1 | eval ss = substring("abcd", "2"), l = left("abcd", "2"), r = right("abcd", "2"); @@ -80,7 +80,7 @@ a:integer | ss:keyword | l:keyword | r:keyword ; stringCastEmp -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting from employees | eval ss = substring(first_name, "2") @@ -330,7 +330,7 @@ emp_no:integer | name:keyword // Note: no matches in MV returned in -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where job_positions in ("Internship", first_name) | keep emp_no, job_positions; ignoreOrder:true @@ -522,7 +522,7 @@ emp_no:integer |positions:keyword ; lessThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where job_positions < "C" | keep emp_no, job_positions | sort emp_no; warning:Line 1:24: evaluation of [job_positions < \"C\"] failed, treating result as null. Only first 20 failures recorded. @@ -535,7 +535,7 @@ emp_no:integer |job_positions:keyword ; greaterThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where job_positions > "C" | keep emp_no, job_positions | sort emp_no | limit 6; warning:Line 1:24: evaluation of [job_positions > \"C\"] failed, treating result as null. Only first 20 failures recorded. @@ -552,7 +552,7 @@ emp_no:integer |job_positions:keyword ; equalToMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where job_positions == "Accountant" | keep emp_no, job_positions | sort emp_no; warning:Line 1:24: evaluation of [job_positions == \"Accountant\"] failed, treating result as null. Only first 20 failures recorded. @@ -564,7 +564,7 @@ emp_no:integer |job_positions:keyword ; equalToOrEqualToMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where job_positions == "Accountant" or job_positions == "Tech Lead" | keep emp_no, job_positions | sort emp_no; warning:Line 1:24: evaluation of [job_positions] failed, treating result as null. Only first 20 failures recorded. @@ -577,7 +577,7 @@ emp_no:integer |job_positions:keyword ; inMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where job_positions in ("Accountant", "Tech Lead") | keep emp_no, job_positions | sort emp_no; warning:Line 1:24: evaluation of [job_positions in (\"Accountant\", \"Tech Lead\")] failed, treating result as null. Only first 20 failures recorded. @@ -590,7 +590,7 @@ emp_no:integer |job_positions:keyword ; notLessThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where not(job_positions < "C") | keep emp_no, job_positions | sort emp_no | limit 6; warning:Line 1:24: evaluation of [not(job_positions < \"C\")] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [job_positions < \"C\"] failed, treating result as null. Only first 20 failures recorded.] @@ -607,7 +607,7 @@ emp_no:integer |job_positions:keyword ; notGreaterThanMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where not(job_positions > "C") | keep emp_no, job_positions | sort emp_no | limit 6; warning:Line 1:24: evaluation of [not(job_positions > \"C\")] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [job_positions > \"C\"] failed, treating result as null. Only first 20 failures recorded.] @@ -620,7 +620,7 @@ emp_no:integer |job_positions:keyword ; notEqualToMultivalue -required_feature: esql.mv_warn +required_capability: mv_warn from employees | where not(job_positions == "Accountant") | keep emp_no, job_positions | sort emp_no | limit 6; warning:Line 1:24: evaluation of [not(job_positions == \"Accountant\")] failed, treating result as null. Only first 20 failures recorded.#[Emulated:Line 1:28: evaluation of [job_positions == \"Accountant\"] failed, treating result as null. Only first 20 failures recorded.] @@ -745,7 +745,7 @@ ROW a=[10, 9, 8] ; mvSort -required_feature: esql.mv_sort +required_capability: mv_sort row a = ["Mon", "Tues", "Wed", "Thu", "Fri"] | eval sa = mv_sort(a), sd = mv_sort(a, "DESC"); @@ -754,7 +754,7 @@ a:keyword | sa:keyword | sd:keyword ; mvSortEmp -required_feature: esql.mv_sort +required_capability: mv_sort FROM employees | eval sd = mv_sort(job_positions, "DESC"), sa = mv_sort(job_positions) @@ -772,7 +772,7 @@ emp_no:integer | job_positions:keyword ; mvSliceCast -required_feature: esql.string_literal_auto_casting +required_capability: string_literal_auto_casting ROW a = ["1", "2", "3", "4"] | eval a1 = mv_slice(a, "0", "1"); @@ -782,7 +782,7 @@ a:keyword | a1:keyword ; mvSliceEmp -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval a1 = mv_slice(salary_change.keyword, 0, 1) @@ -799,7 +799,7 @@ emp_no:integer | salary_change.keyword:keyword | a1:keyword ; mvZip -required_feature: esql.mv_sort +required_capability: mv_sort // tag::mv_zip[] ROW a = ["x", "y", "z"], b = ["1", "2"] @@ -815,7 +815,7 @@ a:keyword | b:keyword | c:keyword ; mvZipEmp -required_feature: esql.mv_sort +required_capability: mv_sort from employees | eval full_name = mv_zip(first_name, last_name, " "), full_name_2 = mv_zip(last_name, first_name), jobs = mv_zip(job_positions, salary_change.keyword, "#") @@ -842,7 +842,7 @@ beta | Kubernetes cluster | [beta k8s server, beta k8s server2 ; lengthOfText -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | where host=="epsilon" | eval l1 = length(host_group), l2 = length(description) | keep l1, l2; ignoreOrder:true @@ -856,7 +856,7 @@ null | 19 ; startsWithText -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | where host=="epsilon" | eval l1 = starts_with(host_group, host), l2 = starts_with(description, host) | keep l1, l2; ignoreOrder:true @@ -870,7 +870,7 @@ false | null ; substringOfText -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | where host=="epsilon" | eval l1 = substring(host_group, 0, 5), l2 = substring(description, 0, 5) | keep l1, l2; ignoreOrder:true @@ -884,7 +884,7 @@ Gatew | null ; concatOfText -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | where host == "epsilon" | eval l1 = concat(host, "/", host_group), l2 = concat(host_group, "/", description) | sort l1 | keep l1, l2; warning:Line 1:86: evaluation of [concat(host_group, \"/\", description)] failed, treating result as null. Only first 20 failures recorded. @@ -1150,7 +1150,7 @@ a:keyword | upper:keyword | lower:keyword ; values -required_feature: esql.agg_values +required_capability: agg_values FROM employees | WHERE emp_no <= 10009 @@ -1162,7 +1162,7 @@ required_feature: esql.agg_values ; valuesGrouped -required_feature: esql.agg_values +required_capability: agg_values // tag::values-grouped[] FROM employees @@ -1314,7 +1314,7 @@ min(f_l):integer | max(f_l):integer | job_positions:keyword ; locateWarnings#[skip:-8.13.99,reason:new string function added in 8.14] -required_feature: esql.mv_warn +required_capability: mv_warn from hosts | where host=="epsilon" | eval l1 = locate(host_group, "ate"), l2 = locate(description, "ate") | keep l1, l2; ignoreOrder:true @@ -1328,7 +1328,7 @@ null | 0 ; base64Encode#[skip:-8.13.99,reason:new base64 function added in 8.14] -required_feature: esql.base64_decode_encode +required_capability: base64_decode_encode // tag::to_base64[] row a = "elastic" @@ -1343,7 +1343,7 @@ elastic | ZWxhc3RpYw== ; base64Decode#[skip:-8.13.99,reason:new base64 function added in 8.14] -required_feature: esql.base64_decode_encode +required_capability: base64_decode_encode // tag::from_base64[] row a = "ZWxhc3RpYw==" @@ -1358,7 +1358,7 @@ ZWxhc3RpYw== | elastic ; base64EncodeDecodeEmp#[skip:-8.13.99,reason:new base64 function added in 8.14] -required_feature: esql.base64_decode_encode +required_capability: base64_decode_encode from employees | where emp_no < 10032 and emp_no > 10027 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec index fa524d270bb98..38f3d439e7504 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec @@ -46,7 +46,7 @@ from ul_logs | sort bytes_in desc nulls last, id | limit 12; ; filterPushDownGT -required_feature: esql.mv_warn +required_capability: mv_warn from ul_logs | where bytes_in >= to_ul(74330435873664882) | sort bytes_in | eval div = bytes_in / to_ul(pow(10., 15)) | keep bytes_in, div, id | limit 12; warning:Line 1:22: evaluation of [bytes_in >= to_ul(74330435873664882)] failed, treating result as null. Only first 20 failures recorded. @@ -68,7 +68,7 @@ warning:Line 1:22: java.lang.IllegalArgumentException: single-value function enc ; filterPushDownRange -required_feature: esql.mv_warn +required_capability: mv_warn from ul_logs | where bytes_in >= to_ul(74330435873664882) | where bytes_in <= to_ul(316080452389500167) | sort bytes_in | eval div = bytes_in / to_ul(pow(10., 15)) | keep bytes_in, div, id | limit 12; warning:Line 1:22: evaluation of [bytes_in >= to_ul(74330435873664882)] failed, treating result as null. Only first 20 failures recorded. @@ -84,7 +84,7 @@ warning:#[Emulated:Line 1:67: java.lang.IllegalArgumentException: single-value f ; filterPushDownIn -required_feature: esql.mv_warn +required_capability: mv_warn // TODO: testing framework doesn't perform implicit conversion to UL of given values, needs explicit conversion from ul_logs | where bytes_in in (to_ul(74330435873664882), to_ul(154551962150890564), to_ul(195161570976258241)) | sort bytes_in | keep bytes_in, id; @@ -98,7 +98,7 @@ warning:Line 1:22: java.lang.IllegalArgumentException: single-value function enc ; filterOnFieldsEquality -required_feature: esql.mv_warn +required_capability: mv_warn from ul_logs | where bytes_in == bytes_out; warning:Line 1:22: evaluation of [bytes_in == bytes_out] failed, treating result as null. Only first 20 failures recorded. @@ -109,7 +109,7 @@ warning:Line 1:22: java.lang.IllegalArgumentException: single-value function enc ; filterOnFieldsInequality -required_feature: esql.mv_warn +required_capability: mv_warn from ul_logs | sort id | where bytes_in < bytes_out | eval b_in = bytes_in / to_ul(pow(10.,15)), b_out = bytes_out / to_ul(pow(10.,15)) | limit 5; warning:Line 1:32: evaluation of [bytes_in < bytes_out] failed, treating result as null. Only first 20 failures recorded. @@ -140,7 +140,7 @@ from ul_logs | stats c = count(bytes_in) by bytes_in | sort c desc, bytes_in des ; case -required_feature: esql.mv_warn +required_capability: mv_warn from ul_logs | where case(bytes_in == to_ul(154551962150890564), true, false); warning:Line 1:27: evaluation of [bytes_in == to_ul(154551962150890564)] failed, treating result as null. Only first 20 failures recorded. @@ -151,7 +151,7 @@ warning:Line 1:27: java.lang.IllegalArgumentException: single-value function enc ; toDegrees -required_feature: esql.mv_warn +required_capability: mv_warn FROM ul_logs | WHERE bytes_in == bytes_out | EVAL deg = TO_DEGREES(bytes_in) | KEEP bytes_in, deg ; @@ -163,7 +163,7 @@ warning:Line 1:22: java.lang.IllegalArgumentException: single-value function enc ; toRadians -required_feature: esql.mv_warn +required_capability: mv_warn FROM ul_logs | WHERE bytes_in == bytes_out | EVAL rad = TO_RADIANS(bytes_in) | KEEP bytes_in, rad ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec index 513189cc0fe86..3b6c41f883018 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec @@ -312,7 +312,7 @@ null | null | null | 11 | 0 | 1.3.0 | 0.1 | no ; values -required_feature: esql.agg_values +required_capability: agg_values FROM apps | STATS version=MV_SORT(VALUES(version)) @@ -323,7 +323,7 @@ required_feature: esql.agg_values ; valuesGrouped -required_feature: esql.agg_values +required_capability: agg_values FROM apps | EVAL name=SUBSTRING(name, 0, 1) @@ -348,7 +348,7 @@ version:version | name:keyword ; valuesGroupedByOrdinals -required_feature: esql.agg_values +required_capability: agg_values FROM apps | STATS version=MV_SORT(VALUES(version)) BY name @@ -372,7 +372,7 @@ version:version | name:keyword ; implictCastingEqual -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from apps | where version == "1.2.3.4" | sort name | keep name, version; name:keyword | version:version @@ -381,7 +381,7 @@ hhhhh | 1.2.3.4 ; implictCastingNotEqual -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from apps | where version != "1.2.3.4" | sort name, version | keep name, version | limit 2; name:keyword | version:version @@ -390,7 +390,7 @@ bbbbb | 2.1 ; implictCastingGreaterThan -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from apps | where version > "1.2.3.4" | sort name, version | keep name, version | limit 2; name:keyword | version:version @@ -399,7 +399,7 @@ ccccc | 2.3.4 ; implictCastingLessThanOrEqual -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from apps | where version <= "1.2.3.4" | sort name, version | keep name, version | limit 2; name:keyword | version:version @@ -408,7 +408,7 @@ aaaaa | 1.2.3.4 ; implictCastingIn -required_feature: esql.string_literal_auto_casting_extended +required_capability: string_literal_auto_casting_extended from apps | where version in ( "1.2.3.4", "bad" ) | sort name | keep name, version; name:keyword | version:version diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java index b79d7cc0fbdde..3d626e65f6f11 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.ql.type.DataTypes; import java.util.List; @@ -106,12 +105,13 @@ static int estimateSize(DataType dataType) { ElementType elementType = PlannerUtils.toElementType(dataType); return switch (elementType) { case BOOLEAN -> 1; - case BYTES_REF -> { - if (dataType == DataTypes.IP) { - yield 16; - } - yield 50; // wild estimate for the size of a string. - } + case BYTES_REF -> switch (dataType.typeName()) { + case "ip" -> 16; // IP addresses, both IPv4 and IPv6, are encoded using 16 bytes. + case "version" -> 15; // 8.15.2-SNAPSHOT is 15 bytes, most are shorter, some can be longer + case "geo_point", "cartesian_point" -> 21; // WKB for points is typically 21 bytes. + case "geo_shape", "cartesian_shape" -> 200; // wild estimate, based on some test data (airport_city_boundaries) + default -> 50; // wild estimate for the size of a string. + }; case DOC -> throw new EsqlIllegalArgumentException("can't load a [doc] with field extraction"); case DOUBLE -> Double.BYTES; case INT -> Integer.BYTES; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java index 4f852264193b4..cf311d4413671 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlFeatures.java @@ -10,10 +10,23 @@ import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import java.util.Map; import java.util.Set; +/** + * {@link NodeFeature}s declared by ESQL. These should be used for fast checks + * on the node. Before the introduction of the {@link RestNodesCapabilitiesAction} + * this was used for controlling which features are tested so many of the + * examples below are *just* used for that. Don't make more of those - add them + * to {@link EsqlCapabilities} instead. + *

+ * NOTE: You can't remove a feature now and probably never will be able to. + * Only add more of these if you need a fast CPU level check. + *

+ */ public class EsqlFeatures implements FeatureSpecification { /** * Introduction of {@code MV_SORT}, {@code MV_SLICE}, and {@code MV_ZIP}. diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java index 6061b6db89724..87c93a9198215 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMoveToStepAction.java @@ -212,6 +212,7 @@ public static class Request extends AcknowledgedRequest implements ToXC private PartialStepKey nextStepKey; public Request(String index, Step.StepKey currentStepKey, PartialStepKey nextStepKey) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.index = index; this.currentStepKey = currentStepKey; this.nextStepKey = nextStepKey; @@ -224,7 +225,9 @@ public Request(StreamInput in) throws IOException { this.nextStepKey = new PartialStepKey(in); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public String getIndex() { return index; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportRetryAction.java index 5818ce6582bef..95358adb832c7 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportRetryAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportRetryAction.java @@ -118,6 +118,7 @@ public static class Request extends AcknowledgedRequest implements Indi private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); public Request(String... indices) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.indices = indices; } @@ -127,7 +128,9 @@ public Request(StreamInput in) throws IOException { this.indicesOptions = IndicesOptions.readIndicesOptions(in); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } @Override public Request indices(String... indices) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionAction.java index 5d75adedddde0..e11e9d5ad8cc9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/action/openai/OpenAiChatCompletionAction.java @@ -44,16 +44,17 @@ public OpenAiChatCompletionAction(Sender sender, OpenAiChatCompletionModel model @Override public void execute(InferenceInputs inferenceInputs, TimeValue timeout, ActionListener listener) { - if (inferenceInputs instanceof DocumentsOnlyInput docsOnlyInput) { - if (docsOnlyInput.getInputs().size() > 1) { - listener.onFailure(new ElasticsearchStatusException("OpenAI completions only accepts 1 input", RestStatus.BAD_REQUEST)); - return; - } - } else { + if (inferenceInputs instanceof DocumentsOnlyInput == false) { listener.onFailure(new ElasticsearchStatusException("Invalid inference input type", RestStatus.INTERNAL_SERVER_ERROR)); return; } + var docsOnlyInput = (DocumentsOnlyInput) inferenceInputs; + if (docsOnlyInput.getInputs().size() > 1) { + listener.onFailure(new ElasticsearchStatusException("OpenAI completions only accepts 1 input", RestStatus.BAD_REQUEST)); + return; + } + try { ActionListener wrappedListener = wrapFailuresInElasticsearchException(errorMessage, listener); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java index 93141727f705c..c9cc71b7fdcda 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/cohere/CohereRankedResponseEntity.java @@ -148,14 +148,12 @@ private static RankedDocsResults.RankedDoc parseRankedDocObject(XContentParser p } if (index == -1) { - logger.error("Failed to find required field [index] in Cohere embeddings response"); + logger.warn("Failed to find required field [index] in Cohere rerank response"); } if (relevanceScore == -1) { - logger.error("Failed to find required field [relevance_score] in Cohere embeddings response"); - } - if (documentText == null) { - logger.error("Failed to find required field [document] in Cohere embeddings response"); + logger.warn("Failed to find required field [relevance_score] in Cohere rerank response"); } + // documentText may or may not be present depending on the request parameter return new RankedDocsResults.RankedDoc(index, relevanceScore, documentText); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java index 6f9e32e32f667..47c7cc0fce015 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java @@ -60,16 +60,42 @@ public static T removeAsType(Map sourceMap, String key, Clas if (type.isAssignableFrom(o.getClass())) { return (T) o; } else { - throw new ElasticsearchStatusException( - "field [{}] is not of the expected type." + " The value [{}] cannot be converted to a [{}]", - RestStatus.BAD_REQUEST, - key, - o, - type.getSimpleName() - ); + throw new ElasticsearchStatusException(invalidTypeErrorMsg(key, o, type.getSimpleName()), RestStatus.BAD_REQUEST); } } + /** + * Remove the object from the map and cast to the expected type. + * If the object cannot be cast to type and error is added to the + * {@code validationException} parameter + * + * @param sourceMap Map containing fields + * @param key The key of the object to remove + * @param type The expected type of the removed object + * @param validationException If the value is not of type {@code type} + * @return {@code null} if not present else the object cast to type T + * @param The expected type + */ + @SuppressWarnings("unchecked") + public static T removeAsType(Map sourceMap, String key, Class type, ValidationException validationException) { + Object o = sourceMap.remove(key); + if (o == null) { + return null; + } + + if (type.isAssignableFrom(o.getClass())) { + return (T) o; + } else { + validationException.addValidationError(invalidTypeErrorMsg(key, o, type.getSimpleName())); + return null; + } + } + + @SuppressWarnings("unchecked") + public static Map removeFromMap(Map sourceMap, String fieldName) { + return (Map) sourceMap.remove(fieldName); + } + @SuppressWarnings("unchecked") public static Map removeFromMapOrThrowIfNull(Map sourceMap, String fieldName) { Map value = (Map) sourceMap.remove(fieldName); @@ -116,6 +142,15 @@ public static String missingSettingErrorMsg(String settingName, String scope) { return Strings.format("[%s] does not contain the required setting [%s]", scope, settingName); } + public static String invalidTypeErrorMsg(String settingName, Object foundObject, String expectedType) { + return Strings.format( + "field [%s] is not of the expected type. The value [%s] cannot be converted to a [%s]", + settingName, + foundObject, + expectedType + ); + } + public static String invalidUrlErrorMsg(String url, String settingName, String settingScope) { return Strings.format("[%s] Invalid url [%s] received for field [%s]", settingScope, url, settingName); } @@ -230,7 +265,13 @@ public static String extractRequiredString( String scope, ValidationException validationException ) { - String requiredField = ServiceUtils.removeAsType(map, settingName, String.class); + int initialValidationErrorCount = validationException.validationErrors().size(); + String requiredField = ServiceUtils.removeAsType(map, settingName, String.class, validationException); + + if (validationException.validationErrors().size() > initialValidationErrorCount) { + // new validation error occurred + return null; + } if (requiredField == null) { validationException.addValidationError(ServiceUtils.missingSettingErrorMsg(settingName, scope)); @@ -238,7 +279,7 @@ public static String extractRequiredString( validationException.addValidationError(ServiceUtils.mustBeNonEmptyString(settingName, scope)); } - if (validationException.validationErrors().isEmpty() == false) { + if (validationException.validationErrors().size() > initialValidationErrorCount) { return null; } @@ -251,13 +292,19 @@ public static String extractOptionalString( String scope, ValidationException validationException ) { - String optionalField = ServiceUtils.removeAsType(map, settingName, String.class); + int initialValidationErrorCount = validationException.validationErrors().size(); + String optionalField = ServiceUtils.removeAsType(map, settingName, String.class, validationException); + + if (validationException.validationErrors().size() > initialValidationErrorCount) { + // new validation error occurred + return null; + } if (optionalField != null && optionalField.isEmpty()) { validationException.addValidationError(ServiceUtils.mustBeNonEmptyString(settingName, scope)); } - if (validationException.validationErrors().isEmpty() == false) { + if (validationException.validationErrors().size() > initialValidationErrorCount) { return null; } @@ -270,13 +317,18 @@ public static Integer extractOptionalPositiveInteger( String scope, ValidationException validationException ) { - Integer optionalField = ServiceUtils.removeAsType(map, settingName, Integer.class); + int initialValidationErrorCount = validationException.validationErrors().size(); + Integer optionalField = ServiceUtils.removeAsType(map, settingName, Integer.class, validationException); + + if (validationException.validationErrors().size() > initialValidationErrorCount) { + return null; + } if (optionalField != null && optionalField <= 0) { validationException.addValidationError(ServiceUtils.mustBeAPositiveNumberErrorMessage(settingName, scope, optionalField)); } - if (validationException.validationErrors().isEmpty() == false) { + if (validationException.validationErrors().size() > initialValidationErrorCount) { return null; } @@ -309,19 +361,8 @@ public static > E extractOptionalEnum( return null; } - public static Boolean extractOptionalBoolean( - Map map, - String settingName, - String scope, - ValidationException validationException - ) { - Boolean optionalField = ServiceUtils.removeAsType(map, settingName, Boolean.class); - - if (validationException.validationErrors().isEmpty() == false) { - return null; - } - - return optionalField; + public static Boolean extractOptionalBoolean(Map map, String settingName, ValidationException validationException) { + return ServiceUtils.removeAsType(map, settingName, Boolean.class, validationException); } public static TimeValue extractOptionalTimeValue( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettings.java index 514d5684fc7c8..33bb0fdb07c58 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/azureopenai/embeddings/AzureOpenAiEmbeddingsServiceSettings.java @@ -92,12 +92,7 @@ private static CommonFields fromMap( SimilarityMeasure similarity = extractSimilarity(map, ModelConfigurations.SERVICE_SETTINGS, validationException); RateLimitSettings rateLimitSettings = RateLimitSettings.of(map, DEFAULT_RATE_LIMIT_SETTINGS, validationException); - Boolean dimensionsSetByUser = extractOptionalBoolean( - map, - DIMENSIONS_SET_BY_USER, - ModelConfigurations.SERVICE_SETTINGS, - validationException - ); + Boolean dimensionsSetByUser = extractOptionalBoolean(map, DIMENSIONS_SET_BY_USER, validationException); switch (context) { case REQUEST -> { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java index d55615e9df48a..b23f6f188d8c5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceSettings.java @@ -44,7 +44,7 @@ public class CohereServiceSettings extends FilteredXContentObject implements Ser public static final String OLD_MODEL_ID_FIELD = "model"; public static final String MODEL_ID = "model_id"; private static final Logger logger = LogManager.getLogger(CohereServiceSettings.class); - // The rate limit defined here is pulled for the blog: https://txt.cohere.com/free-developer-tier-announcement/ for the production tier + // Production key rate limits for all endpoints: https://docs.cohere.com/docs/going-live#production-key-specifications // 10K requests a minute private static final RateLimitSettings DEFAULT_RATE_LIMIT_SETTINGS = new RateLimitSettings(10_000); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankTaskSettings.java index 75588aa2b5036..82f2d0e6f7ada 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankTaskSettings.java @@ -49,7 +49,7 @@ public static CohereRerankTaskSettings fromMap(Map map) { return EMPTY_SETTINGS; } - Boolean returnDocuments = extractOptionalBoolean(map, RETURN_DOCUMENTS, ModelConfigurations.TASK_SETTINGS, validationException); + Boolean returnDocuments = extractOptionalBoolean(map, RETURN_DOCUMENTS, validationException); Integer topNDocumentsOnly = extractOptionalPositiveInteger( map, TOP_N_DOCS_ONLY, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java index aa05af9461565..1f9ec163aa546 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandModel.java @@ -9,16 +9,32 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.Model; +import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.inference.TaskType; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import java.util.Map; + import static org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus.State.STARTED; public class CustomElandModel extends ElasticsearchModel { + public static CustomElandModel build( + String inferenceEntityId, + TaskType taskType, + String service, + CustomElandInternalServiceSettings serviceSettings, + @Nullable TaskSettings taskSettings + ) { + return taskSettings == null + ? new CustomElandModel(inferenceEntityId, taskType, service, serviceSettings) + : new CustomElandModel(inferenceEntityId, taskType, service, serviceSettings, taskSettings); + } + public CustomElandModel( String inferenceEntityId, TaskType taskType, @@ -28,6 +44,16 @@ public CustomElandModel( super(inferenceEntityId, taskType, service, serviceSettings); } + private CustomElandModel( + String inferenceEntityId, + TaskType taskType, + String service, + CustomElandInternalServiceSettings serviceSettings, + TaskSettings taskSettings + ) { + super(inferenceEntityId, taskType, service, serviceSettings, taskSettings); + } + @Override public CustomElandInternalServiceSettings getServiceSettings() { return (CustomElandInternalServiceSettings) super.getServiceSettings(); @@ -76,4 +102,11 @@ public void onFailure(Exception e) { }; } + public static TaskSettings taskSettingsFromMap(TaskType taskType, Map taskSettingsMap) { + if (TaskType.RERANK.equals(taskType)) { + return CustomElandRerankTaskSettings.defaultsFromMap(taskSettingsMap); + } + + return null; + } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankTaskSettings.java new file mode 100644 index 0000000000000..a82ffbba3d688 --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/CustomElandRerankTaskSettings.java @@ -0,0 +1,134 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.elasticsearch; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.inference.TaskSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalBoolean; + +/** + * Defines the task settings for internal rerank service. + */ +public class CustomElandRerankTaskSettings implements TaskSettings { + + public static final String NAME = "custom_eland_rerank_task_settings"; + public static final String RETURN_DOCUMENTS = "return_documents"; + + static final CustomElandRerankTaskSettings DEFAULT_SETTINGS = new CustomElandRerankTaskSettings(Boolean.TRUE); + + public static CustomElandRerankTaskSettings defaultsFromMap(Map map) { + ValidationException validationException = new ValidationException(); + + if (map == null || map.isEmpty()) { + return DEFAULT_SETTINGS; + } + + Boolean returnDocuments = extractOptionalBoolean(map, RETURN_DOCUMENTS, validationException); + if (validationException.validationErrors().isEmpty() == false) { + throw validationException; + } + + if (returnDocuments == null) { + returnDocuments = true; + } + + return new CustomElandRerankTaskSettings(returnDocuments); + } + + /** + * From map without any validation + * @param map source map + * @return Task settings + */ + public static CustomElandRerankTaskSettings fromMap(Map map) { + if (map == null || map.isEmpty()) { + return DEFAULT_SETTINGS; + } + + Boolean returnDocuments = extractOptionalBoolean(map, RETURN_DOCUMENTS, new ValidationException()); + return new CustomElandRerankTaskSettings(returnDocuments); + } + + /** + * Return either the request or orignal settings by preferring non-null fields + * from the request settings over the original settings. + * + * @param originalSettings the settings stored as part of the inference entity configuration + * @param requestTaskSettings the settings passed in within the task_settings field of the request + * @return Either {@code originalSettings} or {@code requestTaskSettings} + */ + public static CustomElandRerankTaskSettings of( + CustomElandRerankTaskSettings originalSettings, + CustomElandRerankTaskSettings requestTaskSettings + ) { + return requestTaskSettings.returnDocuments() != null ? requestTaskSettings : originalSettings; + } + + private final Boolean returnDocuments; + + public CustomElandRerankTaskSettings(StreamInput in) throws IOException { + this(in.readOptionalBoolean()); + } + + public CustomElandRerankTaskSettings(@Nullable Boolean doReturnDocuments) { + this.returnDocuments = doReturnDocuments; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (returnDocuments != null) { + builder.field(RETURN_DOCUMENTS, returnDocuments); + } + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.ML_INFERENCE_COHERE_RERANK; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalBoolean(returnDocuments); + } + + public Boolean returnDocuments() { + return returnDocuments; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CustomElandRerankTaskSettings that = (CustomElandRerankTaskSettings) o; + return Objects.equals(returnDocuments, that.returnDocuments); + } + + @Override + public int hashCode() { + return Objects.hash(returnDocuments); + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index cceeb59284c1b..408e3ec1ccbca 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -30,6 +30,7 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.inference.results.ChunkedTextEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; +import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; import org.elasticsearch.xpack.core.inference.results.TextEmbeddingResults; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; @@ -40,18 +41,22 @@ import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextEmbeddingConfigUpdate; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextSimilarityConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TokenizationConfigUpdate; import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Function; import static org.elasticsearch.xpack.core.ClientHelper.INFERENCE_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.inference.results.ResultUtils.createInvalidChunkedResultException; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMap; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; import static org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings.MODEL_ID; @@ -85,6 +90,7 @@ public void parseRequestConfig( ) { try { Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMap(config, ModelConfigurations.TASK_SETTINGS); String modelId = (String) serviceSettingsMap.get(MODEL_ID); if (modelId == null) { throw new IllegalArgumentException("Error parsing request config, model id is missing"); @@ -93,7 +99,7 @@ public void parseRequestConfig( e5Case(inferenceEntityId, taskType, config, platformArchitectures, serviceSettingsMap, modelListener); } else { throwIfNotEmptyMap(config, name()); - customElandCase(inferenceEntityId, taskType, serviceSettingsMap, modelListener); + customElandCase(inferenceEntityId, taskType, serviceSettingsMap, taskSettingsMap, modelListener); } } catch (Exception e) { modelListener.onFailure(e); @@ -104,6 +110,7 @@ private void customElandCase( String inferenceEntityId, TaskType taskType, Map serviceSettingsMap, + Map taskSettingsMap, ActionListener modelListener ) { String modelId = (String) serviceSettingsMap.get(MODEL_ID); @@ -121,7 +128,18 @@ private void customElandCase( serviceSettingsMap ).build(); throwIfNotEmptyMap(serviceSettingsMap, name()); - delegate.onResponse(new CustomElandModel(inferenceEntityId, taskType, name(), customElandInternalServiceSettings)); + + var taskSettings = CustomElandModel.taskSettingsFromMap(TaskType.RERANK, taskSettingsMap); + throwIfNotEmptyMap(taskSettingsMap, name()); + + var model = CustomElandModel.build( + inferenceEntityId, + TaskType.RERANK, + name(), + customElandInternalServiceSettings, + taskSettings + ); + delegate.onResponse(model); } }); @@ -184,6 +202,7 @@ public ElasticsearchModel parsePersistedConfigWithSecrets( @Override public ElasticsearchModel parsePersistedConfig(String inferenceEntityId, TaskType taskType, Map config) { Map serviceSettingsMap = removeFromMapOrThrowIfNull(config, ModelConfigurations.SERVICE_SETTINGS); + Map taskSettingsMap = removeFromMap(config, ModelConfigurations.TASK_SETTINGS); String modelId = (String) serviceSettingsMap.get(MODEL_ID); if (modelId == null) { @@ -198,14 +217,12 @@ public ElasticsearchModel parsePersistedConfig(String inferenceEntityId, TaskTyp (MultilingualE5SmallInternalServiceSettings) MultilingualE5SmallInternalServiceSettings.fromMap(serviceSettingsMap).build() ); } else { - return new CustomElandModel( - inferenceEntityId, - taskType, - name(), - (CustomElandInternalServiceSettings) CustomElandInternalServiceSettings.fromMap(serviceSettingsMap).build() - ); - } + var serviceSettings = (CustomElandInternalServiceSettings) CustomElandInternalServiceSettings.fromMap(serviceSettingsMap) + .build(); + var taskSettings = CustomElandModel.taskSettingsFromMap(taskType, taskSettingsMap); + return CustomElandModel.build(inferenceEntityId, taskType, name(), serviceSettings, taskSettings); + } } @Override @@ -218,13 +235,23 @@ public void infer( TimeValue timeout, ActionListener listener ) { - try { - checkCompatibleTaskType(model.getConfigurations().getTaskType()); - } catch (Exception e) { - listener.onFailure(e); - return; + var taskType = model.getConfigurations().getTaskType(); + if (TaskType.TEXT_EMBEDDING.equals(taskType)) { + inferTextEmbedding(model, input, inputType, timeout, listener); + } else if (TaskType.RERANK.equals(taskType)) { + inferRerank(model, query, input, timeout, taskSettings, listener); + } else { + throw new ElasticsearchStatusException(TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), RestStatus.BAD_REQUEST); } + } + public void inferTextEmbedding( + Model model, + List input, + InputType inputType, + TimeValue timeout, + ActionListener listener + ) { var request = InferTrainedModelDeploymentAction.Request.forTextInput( model.getConfigurations().getInferenceEntityId(), TextEmbeddingConfigUpdate.EMPTY_INSTANCE, @@ -239,6 +266,37 @@ public void infer( ); } + public void inferRerank( + Model model, + String query, + List inputs, + TimeValue timeout, + Map requestTaskSettings, + ActionListener listener + ) { + var config = new TextSimilarityConfigUpdate(query); + var request = InferTrainedModelDeploymentAction.Request.forTextInput( + model.getConfigurations().getInferenceEntityId(), + config, + inputs, + timeout + ); + + var modelSettings = (CustomElandRerankTaskSettings) model.getTaskSettings(); + var requestSettings = CustomElandRerankTaskSettings.fromMap(requestTaskSettings); + Boolean returnDocs = CustomElandRerankTaskSettings.of(modelSettings, requestSettings).returnDocuments(); + + Function inputSupplier = returnDocs == Boolean.TRUE ? inputs::get : i -> null; + + client.execute( + InferTrainedModelDeploymentAction.INSTANCE, + request, + listener.delegateFailureAndWrap( + (l, inferenceResult) -> l.onResponse(textSimilarityResultsToRankedDocs(inferenceResult.getResults(), inputSupplier)) + ) + ); + } + public void chunkedInfer( Model model, List input, @@ -262,10 +320,10 @@ public void chunkedInfer( TimeValue timeout, ActionListener> listener ) { - try { - checkCompatibleTaskType(model.getConfigurations().getTaskType()); - } catch (Exception e) { - listener.onFailure(e); + if (TaskType.TEXT_EMBEDDING.isAnyOrSame(model.getTaskType()) == false) { + listener.onFailure( + new ElasticsearchStatusException(TaskType.unsupportedTaskTypeErrorMsg(model.getTaskType(), NAME), RestStatus.BAD_REQUEST) + ); return; } @@ -315,7 +373,7 @@ public void start(Model model, ActionListener listener) { return; } - if (model.getConfigurations().getTaskType() != TaskType.TEXT_EMBEDDING) { + if (model.getTaskType() != TaskType.TEXT_EMBEDDING && model.getTaskType() != TaskType.RERANK) { listener.onFailure( new IllegalStateException(TaskType.unsupportedTaskTypeErrorMsg(model.getConfigurations().getTaskType(), NAME)) ); @@ -364,7 +422,7 @@ public void putModel(Model model, ActionListener listener) { } }) ); - } else if (model instanceof CustomElandModel elandModel) { + } else if (model instanceof CustomElandModel) { logger.info("Custom eland model detected, model must have been already loaded into the cluster with eland."); listener.onResponse(Boolean.TRUE); } else { @@ -412,12 +470,6 @@ private static IllegalStateException notTextEmbeddingModelException(Model model) ); } - private void checkCompatibleTaskType(TaskType taskType) { - if (TaskType.TEXT_EMBEDDING.isAnyOrSame(taskType) == false) { - throw new ElasticsearchStatusException(TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), RestStatus.BAD_REQUEST); - } - } - @Override public boolean isInClusterService() { return true; @@ -448,4 +500,36 @@ private static String selectDefaultModelVariantBasedOnClusterArchitecture(Set results, + Function inputSupplier + ) { + List rankings = new ArrayList<>(results.size()); + for (int i = 0; i < results.size(); i++) { + var result = results.get(i); + if (result instanceof org.elasticsearch.xpack.core.ml.inference.results.TextSimilarityInferenceResults similarity) { + rankings.add(new RankedDocsResults.RankedDoc(i, (float) similarity.score(), inputSupplier.apply(i))); + } else if (result instanceof org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults errorResult) { + if (errorResult.getException() instanceof ElasticsearchStatusException statusException) { + throw statusException; + } else { + throw new ElasticsearchStatusException( + "Received error inference result.", + RestStatus.INTERNAL_SERVER_ERROR, + errorResult.getException() + ); + } + } else { + throw new IllegalArgumentException( + "Received invalid inference result, of type " + + result.getClass().getName() + + " but expected TextSimilarityInferenceResults." + ); + } + } + + Collections.sort(rankings); + return new RankedDocsResults(rankings); + } + } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java index 954469537a4cc..dc6561ba992fe 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchModel.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.inference.Model; import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.TaskSettings; import org.elasticsearch.inference.TaskType; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; @@ -25,6 +26,16 @@ public ElasticsearchModel( super(new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings)); } + public ElasticsearchModel( + String inferenceEntityId, + TaskType taskType, + String service, + ElasticsearchInternalServiceSettings serviceSettings, + TaskSettings taskSettings + ) { + super(new ModelConfigurations(inferenceEntityId, taskType, service, serviceSettings, taskSettings)); + } + @Override public ElasticsearchInternalServiceSettings getServiceSettings() { return (ElasticsearchInternalServiceSettings) super.getServiceSettings(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereUtilsTests.java new file mode 100644 index 0000000000000..47aff8dad65db --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/request/cohere/CohereUtilsTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.request.cohere; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.is; + +public class CohereUtilsTests extends ESTestCase { + + public void testCreateRequestSourceHeader() { + var requestSourceHeader = CohereUtils.createRequestSourceHeader(); + + assertThat(requestSourceHeader.getName(), is("Request-Source")); + assertThat(requestSourceHeader.getValue(), is("unspecified:elasticsearch")); + } + +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java index 26f6e5b7e694a..bf9fdbe7235b6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java @@ -31,6 +31,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.convertToUri; import static org.elasticsearch.xpack.inference.services.ServiceUtils.createUri; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalEnum; +import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalPositiveInteger; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalTimeValue; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredSecureString; @@ -71,6 +72,21 @@ public void testRemoveAsTypeWithTheCorrectType() { assertThat(map.entrySet(), empty()); } + public void testRemoveAsType_Validation_WithTheCorrectType() { + Map map = new HashMap<>(Map.of("a", 5, "b", "a string", "c", Boolean.TRUE, "d", 1.0)); + + ValidationException validationException = new ValidationException(); + Integer i = ServiceUtils.removeAsType(map, "a", Integer.class, validationException); + assertEquals(Integer.valueOf(5), i); + assertNull(map.get("a")); // field has been removed + assertThat(validationException.validationErrors(), empty()); + + String str = ServiceUtils.removeAsType(map, "b", String.class, validationException); + assertEquals("a string", str); + assertNull(map.get("b")); + assertThat(validationException.validationErrors(), empty()); + } + public void testRemoveAsTypeWithInCorrectType() { Map map = new HashMap<>(Map.of("a", 5, "b", "a string", "c", Boolean.TRUE, "d", 5.0, "e", 5)); @@ -79,6 +95,7 @@ public void testRemoveAsTypeWithInCorrectType() { e.getMessage(), containsString("field [a] is not of the expected type. The value [5] cannot be converted to a [String]") ); + assertNull(map.get("a")); e = expectThrows(ElasticsearchStatusException.class, () -> ServiceUtils.removeAsType(map, "b", Boolean.class)); assertThat( @@ -108,14 +125,71 @@ public void testRemoveAsTypeWithInCorrectType() { e.getMessage(), containsString("field [e] is not of the expected type. The value [5] cannot be converted to a [Double]") ); + assertNull(map.get("e")); + + assertThat(map.entrySet(), empty()); + } + + public void testRemoveAsType_Validation_WithInCorrectType() { + Map map = new HashMap<>(Map.of("a", 5, "b", "a string", "c", Boolean.TRUE, "d", 5.0, "e", 5)); + + var validationException = new ValidationException(); + Object result = ServiceUtils.removeAsType(map, "a", String.class, validationException); + assertNull(result); + assertThat(validationException.validationErrors(), hasSize(1)); + assertThat( + validationException.validationErrors().get(0), + containsString("field [a] is not of the expected type. The value [5] cannot be converted to a [String]") + ); + assertNull(map.get("a")); + + validationException = new ValidationException(); + ServiceUtils.removeAsType(map, "b", Boolean.class, validationException); + assertThat(validationException.validationErrors(), hasSize(1)); + assertThat( + validationException.validationErrors().get(0), + containsString("field [b] is not of the expected type. The value [a string] cannot be converted to a [Boolean]") + ); + assertNull(map.get("b")); + + validationException = new ValidationException(); + result = ServiceUtils.removeAsType(map, "c", Integer.class, validationException); + assertNull(result); + assertThat(validationException.validationErrors(), hasSize(1)); + assertThat( + validationException.validationErrors().get(0), + containsString("field [c] is not of the expected type. The value [true] cannot be converted to a [Integer]") + ); + assertNull(map.get("c")); + + // cannot convert double to integer + validationException = new ValidationException(); + result = ServiceUtils.removeAsType(map, "d", Integer.class, validationException); + assertNull(result); + assertThat(validationException.validationErrors(), hasSize(1)); + assertThat( + validationException.validationErrors().get(0), + containsString("field [d] is not of the expected type. The value [5.0] cannot be converted to a [Integer]") + ); assertNull(map.get("d")); + // cannot convert integer to double + validationException = new ValidationException(); + result = ServiceUtils.removeAsType(map, "e", Double.class, validationException); + assertNull(result); + assertThat(validationException.validationErrors(), hasSize(1)); + assertThat( + validationException.validationErrors().get(0), + containsString("field [e] is not of the expected type. The value [5] cannot be converted to a [Double]") + ); + assertNull(map.get("e")); + assertThat(map.entrySet(), empty()); } public void testRemoveAsTypeMissingReturnsNull() { Map map = new HashMap<>(Map.of("a", 5, "b", "a string", "c", Boolean.TRUE)); - assertNull(ServiceUtils.removeAsType(new HashMap<>(), "missing", Integer.class)); + assertNull(ServiceUtils.removeAsType(map, "missing", Integer.class)); assertThat(map.entrySet(), hasSize(3)); } @@ -197,10 +271,11 @@ public void testExtractRequiredSecureString_AddsException_WhenFieldIsEmpty() { public void testExtractRequiredString_CreatesString() { var validation = new ValidationException(); + validation.addValidationError("previous error"); Map map = modifiableMap(Map.of("key", "value")); var createdString = extractRequiredString(map, "key", "scope", validation); - assertTrue(validation.validationErrors().isEmpty()); + assertThat(validation.validationErrors(), hasSize(1)); assertNotNull(createdString); assertThat(createdString, is("value")); assertTrue(map.isEmpty()); @@ -208,24 +283,27 @@ public void testExtractRequiredString_CreatesString() { public void testExtractRequiredString_AddsException_WhenFieldDoesNotExist() { var validation = new ValidationException(); + validation.addValidationError("previous error"); + Map map = modifiableMap(Map.of("key", "value")); var createdString = extractRequiredSecureString(map, "abc", "scope", validation); assertNull(createdString); - assertFalse(validation.validationErrors().isEmpty()); + assertThat(validation.validationErrors(), hasSize(2)); assertThat(map.size(), is(1)); - assertThat(validation.validationErrors().get(0), is("[scope] does not contain the required setting [abc]")); + assertThat(validation.validationErrors().get(1), is("[scope] does not contain the required setting [abc]")); } public void testExtractRequiredString_AddsException_WhenFieldIsEmpty() { var validation = new ValidationException(); + validation.addValidationError("previous error"); Map map = modifiableMap(Map.of("key", "")); var createdString = extractOptionalString(map, "key", "scope", validation); assertNull(createdString); assertFalse(validation.validationErrors().isEmpty()); assertTrue(map.isEmpty()); - assertThat(validation.validationErrors().get(0), is("[scope] Invalid value empty string. [key] must be a non-empty string")); + assertThat(validation.validationErrors().get(1), is("[scope] Invalid value empty string. [key] must be a non-empty string")); } public void testExtractOptionalString_CreatesString() { @@ -241,11 +319,12 @@ public void testExtractOptionalString_CreatesString() { public void testExtractOptionalString_DoesNotAddException_WhenFieldDoesNotExist() { var validation = new ValidationException(); + validation.addValidationError("previous error"); Map map = modifiableMap(Map.of("key", "value")); var createdString = extractOptionalString(map, "abc", "scope", validation); assertNull(createdString); - assertTrue(validation.validationErrors().isEmpty()); + assertThat(validation.validationErrors(), hasSize(1)); assertThat(map.size(), is(1)); } @@ -260,6 +339,14 @@ public void testExtractOptionalString_AddsException_WhenFieldIsEmpty() { assertThat(validation.validationErrors().get(0), is("[scope] Invalid value empty string. [key] must be a non-empty string")); } + public void testExtractOptionalPositiveInt() { + var validation = new ValidationException(); + validation.addValidationError("previous error"); + Map map = modifiableMap(Map.of("abc", 1)); + assertEquals(Integer.valueOf(1), extractOptionalPositiveInteger(map, "abc", "scope", validation)); + assertThat(validation.validationErrors(), hasSize(1)); + } + public void testExtractOptionalEnum_ReturnsNull_WhenFieldDoesNotExist() { var validation = new ValidationException(); Map map = modifiableMap(Map.of("key", "value")); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java index 1ac97642f0b85..a306a3e660cd9 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/embeddings/CohereEmbeddingsServiceSettingsTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.services.cohere.embeddings; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -247,7 +246,7 @@ public void testFromMap_InvalidEmbeddingType_ThrowsError_ForPersistent() { public void testFromMap_ReturnsFailure_WhenEmbeddingTypesAreNotValid() { var exception = expectThrows( - ElasticsearchStatusException.class, + ValidationException.class, () -> CohereEmbeddingsServiceSettings.fromMap( new HashMap<>(Map.of(CohereEmbeddingsServiceSettings.EMBEDDING_TYPE, List.of("abc"))), ConfigurationParseContext.PERSISTENT @@ -256,7 +255,7 @@ public void testFromMap_ReturnsFailure_WhenEmbeddingTypesAreNotValid() { MatcherAssert.assertThat( exception.getMessage(), - is("field [embedding_type] is not of the expected type. The value [[abc]] cannot be converted to a [String]") + containsString("field [embedding_type] is not of the expected type. The value [[abc]] cannot be converted to a [String]") ); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index 7212edbb8cf8c..ea11e9d0343e3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -24,15 +24,22 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.inference.results.ChunkedTextEmbeddingResults; import org.elasticsearch.xpack.core.inference.results.ErrorChunkedInferenceResults; +import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.results.ChunkedTextEmbeddingResultsTests; import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TokenizationConfigUpdate; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.settings.InternalServiceSettings; +import org.junit.After; +import org.junit.Before; +import org.mockito.Mockito; import java.util.ArrayList; import java.util.Arrays; @@ -41,6 +48,7 @@ import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -59,6 +67,18 @@ public class ElasticsearchInternalServiceTests extends ESTestCase { TaskType taskType = TaskType.TEXT_EMBEDDING; String randomInferenceEntityId = randomAlphaOfLength(10); + private static ThreadPool threadPool; + + @Before + public void setUpThreadPool() { + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdownThreadPool() { + TestThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + } + public void testParseRequestConfig() { // Null model variant @@ -220,6 +240,95 @@ public void testParseRequestConfig() { } } + @SuppressWarnings("unchecked") + public void testParseRequestConfig_Rerank() { + // with task settings + { + var client = mock(Client.class); + doAnswer(invocation -> { + var listener = (ActionListener) invocation.getArguments()[2]; + listener.onResponse( + new GetTrainedModelsAction.Response(new QueryPage<>(List.of(mock(TrainedModelConfig.class)), 1, mock(ParseField.class))) + ); + return null; + }).when(client).execute(Mockito.same(GetTrainedModelsAction.INSTANCE), any(), any()); + + when(client.threadPool()).thenReturn(threadPool); + + var service = createService(client); + var settings = new HashMap(); + settings.put( + ModelConfigurations.SERVICE_SETTINGS, + new HashMap<>( + Map.of( + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, + 1, + ElasticsearchInternalServiceSettings.NUM_THREADS, + 4, + InternalServiceSettings.MODEL_ID, + "foo" + ) + ) + ); + var returnDocs = randomBoolean(); + settings.put( + ModelConfigurations.TASK_SETTINGS, + new HashMap<>(Map.of(CustomElandRerankTaskSettings.RETURN_DOCUMENTS, returnDocs)) + ); + + ActionListener modelListener = ActionListener.wrap(model -> { + assertThat(model, instanceOf(CustomElandModel.class)); + assertThat(model.getTaskSettings(), instanceOf(CustomElandRerankTaskSettings.class)); + assertThat(model.getServiceSettings(), instanceOf(ElasticsearchInternalServiceSettings.class)); + assertEquals(returnDocs, ((CustomElandRerankTaskSettings) model.getTaskSettings()).returnDocuments()); + }, e -> { fail("Model parsing failed " + e.getMessage()); }); + + service.parseRequestConfig(randomInferenceEntityId, taskType, settings, Set.of(), modelListener); + } + } + + @SuppressWarnings("unchecked") + public void testParseRequestConfig_Rerank_DefaultTaskSettings() { + // with task settings + { + var client = mock(Client.class); + doAnswer(invocation -> { + var listener = (ActionListener) invocation.getArguments()[2]; + listener.onResponse( + new GetTrainedModelsAction.Response(new QueryPage<>(List.of(mock(TrainedModelConfig.class)), 1, mock(ParseField.class))) + ); + return null; + }).when(client).execute(Mockito.same(GetTrainedModelsAction.INSTANCE), any(), any()); + + when(client.threadPool()).thenReturn(threadPool); + + var service = createService(client); + var settings = new HashMap(); + settings.put( + ModelConfigurations.SERVICE_SETTINGS, + new HashMap<>( + Map.of( + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, + 1, + ElasticsearchInternalServiceSettings.NUM_THREADS, + 4, + InternalServiceSettings.MODEL_ID, + "foo" + ) + ) + ); + + ActionListener modelListener = ActionListener.wrap(model -> { + assertThat(model, instanceOf(CustomElandModel.class)); + assertThat(model.getTaskSettings(), instanceOf(CustomElandRerankTaskSettings.class)); + assertThat(model.getServiceSettings(), instanceOf(ElasticsearchInternalServiceSettings.class)); + assertEquals(Boolean.TRUE, ((CustomElandRerankTaskSettings) model.getTaskSettings()).returnDocuments()); + }, e -> { fail("Model parsing failed " + e.getMessage()); }); + + service.parseRequestConfig(randomInferenceEntityId, taskType, settings, Set.of(), modelListener); + } + } + private ActionListener getModelVerificationActionListener(MultilingualE5SmallInternalServiceSettings e5ServiceSettings) { return ActionListener.wrap(model -> { assertEquals( @@ -480,6 +589,61 @@ public void testChunkInferSetsTokenization() { } } + public void testParsePersistedConfig_Rerank() { + // with task settings + { + var service = createService(mock(Client.class)); + var settings = new HashMap(); + settings.put( + ModelConfigurations.SERVICE_SETTINGS, + new HashMap<>( + Map.of( + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, + 1, + ElasticsearchInternalServiceSettings.NUM_THREADS, + 4, + InternalServiceSettings.MODEL_ID, + "foo" + ) + ) + ); + settings.put(InternalServiceSettings.MODEL_ID, "foo"); + var returnDocs = randomBoolean(); + settings.put( + ModelConfigurations.TASK_SETTINGS, + new HashMap<>(Map.of(CustomElandRerankTaskSettings.RETURN_DOCUMENTS, returnDocs)) + ); + + var model = service.parsePersistedConfig(randomInferenceEntityId, TaskType.RERANK, settings); + assertThat(model.getTaskSettings(), instanceOf(CustomElandRerankTaskSettings.class)); + assertEquals(returnDocs, ((CustomElandRerankTaskSettings) model.getTaskSettings()).returnDocuments()); + } + + // without task settings + { + var service = createService(mock(Client.class)); + var settings = new HashMap(); + settings.put( + ModelConfigurations.SERVICE_SETTINGS, + new HashMap<>( + Map.of( + ElasticsearchInternalServiceSettings.NUM_ALLOCATIONS, + 1, + ElasticsearchInternalServiceSettings.NUM_THREADS, + 4, + InternalServiceSettings.MODEL_ID, + "foo" + ) + ) + ); + settings.put(InternalServiceSettings.MODEL_ID, "foo"); + + var model = service.parsePersistedConfig(randomInferenceEntityId, TaskType.RERANK, settings); + assertThat(model.getTaskSettings(), instanceOf(CustomElandRerankTaskSettings.class)); + assertTrue(((CustomElandRerankTaskSettings) model.getTaskSettings()).returnDocuments()); + } + } + private ElasticsearchInternalService createService(Client client) { var context = new InferenceServiceExtension.InferenceServiceFactoryContext(client); return new ElasticsearchInternalService(context); diff --git a/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java b/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java index a397d9864d23d..2f6127c44957f 100644 --- a/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java +++ b/x-pack/plugin/inference/src/yamlRestTest/java/org/elasticsearch/xpack/inference/InferenceRestIT.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; @@ -22,6 +23,7 @@ public class InferenceRestIT extends ESClientYamlSuiteTestCase { .setting("xpack.security.enabled", "false") .setting("xpack.security.http.ssl.enabled", "false") .plugin("inference-service-test") + .feature(FeatureFlag.SEMANTIC_TEXT_ENABLED) .distribution(DistributionType.DEFAULT) .build(); diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index 03f1aaf8577cf..127ea31fa7798 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -705,6 +705,11 @@ protected AggregateMetricSyntheticFieldLoader(String name, String simpleName, En this.metrics = metrics; } + @Override + public String fieldName() { + return name; + } + @Override public Stream> storedFieldLoaders() { return Stream.of(); diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index ebf060f520c5a..0dc37ab9e7251 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -372,6 +372,11 @@ public void write(XContentBuilder b) throws IOException { b.field(simpleName(), fieldType().value); } } + + @Override + public String fieldName() { + return name(); + } }; } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java index 57aba2bb80d68..f09d867087664 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; @@ -71,7 +70,7 @@ public void addMlState() { client(), ClusterState.EMPTY_STATE, TestIndexNameExpressionResolver.newInstance(), - MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT, + TimeValue.THIRTY_SECONDS, future ); future.actionGet(); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotSearchIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotSearchIT.java index 2e16436736e89..2f8165e6a20be 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotSearchIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotSearchIT.java @@ -15,8 +15,8 @@ import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -60,7 +60,7 @@ public void addMlState() { client(), ClusterState.EMPTY_STATE, TestIndexNameExpressionResolver.newInstance(), - MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT, + TimeValue.THIRTY_SECONDS, future ); future.actionGet(); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java index 6cb467af525c9..bc8e4794d7daa 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.OperationRouting; @@ -200,7 +199,7 @@ protected void updateModelSnapshotOnJob(ModelSnapshot modelSnapshot) { client(), ClusterState.EMPTY_STATE, TestIndexNameExpressionResolver.newInstance(), - MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT, + TimeValue.THIRTY_SECONDS, future ); future.get(); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index ae128b507c795..675933808c603 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetadata; @@ -1101,7 +1100,7 @@ private void indexQuantiles(Quantiles quantiles) { client(), ClusterState.EMPTY_STATE, TestIndexNameExpressionResolver.newInstance(), - MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT, + TimeValue.THIRTY_SECONDS, future ); future.actionGet(); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/UnusedStatsRemoverIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/UnusedStatsRemoverIT.java index 4c8382047e796..ee96d154ab55e 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/UnusedStatsRemoverIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/UnusedStatsRemoverIT.java @@ -9,9 +9,9 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xcontent.ToXContent; @@ -57,7 +57,7 @@ public void createComponents() { client(), clusterService().state(), TestIndexNameExpressionResolver.newInstance(client().threadPool().getThreadContext()), - MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT, + TimeValue.THIRTY_SECONDS, future ); future.actionGet(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java index c849e69c780bd..a2d8fd1d60316 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java @@ -146,7 +146,7 @@ public void clusterChanged(ClusterChangedEvent event) { AnnotationIndex.createAnnotationsIndexIfNecessary( client, event.state(), - MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT, + MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, ActionListener.wrap(r -> isIndexCreationInProgress.set(false), e -> { if (e.getMessage().equals(previousException)) { logger.debug("Error creating ML annotations index or aliases", e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsService.java index 9fc97ff234c58..4ee294bcf0d8c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsService.java @@ -256,14 +256,14 @@ private void createStatsIndexIfNecessary() { client, clusterState, indexNameExpressionResolver, - MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT, + MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, ActionListener.wrap( r -> ElasticsearchMappings.addDocMappingIfMissing( MlStatsIndex.writeAlias(), MlStatsIndex::wrappedMapping, client, clusterState, - MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT, + MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, listener, MlStatsIndex.STATS_INDEX_MAPPINGS_VERSION ), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessor.java index 525d3adba7457..c7074f8e7285e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/TextSimilarityProcessor.java @@ -87,7 +87,7 @@ record ResultProcessor(String question, String resultsField, TextSimilarityConfi @Override public InferenceResults processResult(TokenizationResult tokenization, PyTorchInferenceResult pyTorchResult, boolean chunkResult) { if (chunkResult) { - throw chunkingNotSupportedException(TaskType.NER); + throw chunkingNotSupportedException(TaskType.TEXT_SIMILARITY); } if (pyTorchResult.getInferenceResult().length < 1) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index 7a314b82024be..8d83156b0e0ee 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -100,7 +100,6 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; -import static org.elasticsearch.action.support.master.MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_HIDDEN; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -268,7 +267,7 @@ public void testOpenJob() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); when(jobTask.getAllocationId()).thenReturn(1L); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); assertEquals(1, manager.numberOfOpenJobs()); assertTrue(manager.jobHasActiveAutodetectProcess(jobTask)); ArgumentCaptor captor = ArgumentCaptor.forClass(JobTaskState.class); @@ -296,7 +295,7 @@ public void testOpenJob_withoutVersion() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn(job.getId()); AtomicReference errorHolder = new AtomicReference<>(); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> errorHolder.set(e)); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> errorHolder.set(e)); Exception error = errorHolder.get(); assertThat(error, is(notNullValue())); assertThat(error.getMessage(), equalTo("Cannot open job [no_version] because jobs created prior to version 5.5 are not supported")); @@ -339,22 +338,22 @@ public void testOpenJob_exceedMaxNumJobs() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("bar"); when(jobTask.getAllocationId()).thenReturn(1L); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("baz"); when(jobTask.getAllocationId()).thenReturn(2L); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); assertEquals(3, manager.numberOfOpenJobs()); Exception[] holder = new Exception[1]; jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foobar"); when(jobTask.getAllocationId()).thenReturn(3L); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> holder[0] = e); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> holder[0] = e); Exception e = holder[0]; assertEquals("max running job capacity [3] reached", e.getMessage()); @@ -363,7 +362,7 @@ public void testOpenJob_exceedMaxNumJobs() { when(jobTask.getJobId()).thenReturn("baz"); manager.closeJob(jobTask, null); assertEquals(2, manager.numberOfOpenJobs()); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e1, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e1, b) -> {}); assertEquals(3, manager.numberOfOpenJobs()); } @@ -374,7 +373,7 @@ public void testProcessData() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); DataLoadParams params = new DataLoadParams(TimeRange.builder().build(), Optional.empty()); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData( jobTask, analysisRegistry, @@ -401,7 +400,7 @@ public void testProcessDataThrowsElasticsearchStatusException_onIoException() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); Exception[] holder = new Exception[1]; manager.processData(jobTask, analysisRegistry, inputStream, xContentType, params, (dataCounts1, e) -> holder[0] = e); assertNotNull(holder[0]); @@ -413,7 +412,7 @@ public void testCloseJob() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData( jobTask, analysisRegistry, @@ -443,7 +442,7 @@ public void testVacate() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); when(jobTask.triggerVacate()).thenReturn(true); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData( jobTask, analysisRegistry, @@ -475,7 +474,7 @@ public void testCanCloseClosingJob() throws Exception { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData( jobTask, analysisRegistry, @@ -528,7 +527,7 @@ public void testCanKillClosingJob() throws Exception { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData( jobTask, analysisRegistry, @@ -562,7 +561,7 @@ public void testBucketResetMessageIsSent() { InputStream inputStream = createInputStream(""); JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, inputStream, xContentType, params, (dataCounts1, e) -> {}); verify(autodetectCommunicator).writeToJob(same(inputStream), same(analysisRegistry), same(xContentType), same(params), any()); } @@ -573,7 +572,7 @@ public void testFlush() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); InputStream inputStream = createInputStream(""); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData( jobTask, analysisRegistry, @@ -617,7 +616,7 @@ public void testCloseThrows() { // create a jobtask JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData( jobTask, analysisRegistry, @@ -660,7 +659,7 @@ public void testJobHasActiveAutodetectProcess() { when(jobTask.getJobId()).thenReturn("foo"); assertFalse(manager.jobHasActiveAutodetectProcess(jobTask)); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData( jobTask, analysisRegistry, @@ -683,7 +682,7 @@ public void testKillKillsAutodetectProcess() throws IOException { when(jobTask.getJobId()).thenReturn("foo"); assertFalse(manager.jobHasActiveAutodetectProcess(jobTask)); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData( jobTask, analysisRegistry, @@ -728,7 +727,7 @@ public void testProcessData_GivenStateNotOpened() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); InputStream inputStream = createInputStream(""); DataCounts[] dataCounts = new DataCounts[1]; manager.processData( @@ -836,7 +835,7 @@ public void testGetOpenProcessMemoryUsage() { AutodetectProcessManager manager = createSpyManager(); JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); long expectedSizeBytes = Job.PROCESS_MEMORY_OVERHEAD.getBytes() + switch (assignmentMemoryBasis) { case MODEL_MEMORY_LIMIT -> modelMemoryLimitBytes; @@ -905,7 +904,7 @@ private AutodetectProcessManager createSpyManagerAndCallProcessData(String jobId AutodetectProcessManager manager = createSpyManager(); JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn(jobId); - manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + manager.openJob(jobTask, clusterState, TimeValue.THIRTY_SECONDS, (e, b) -> {}); manager.processData( jobTask, analysisRegistry, diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetStatusAction.java index 0d8f3aad27daa..05ab989f444fe 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetStatusAction.java @@ -133,7 +133,9 @@ public Request(StreamInput in) throws IOException { waitForResourcesCreated = in.readBoolean(); } - public Request() {} + public Request() { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + } public boolean waitForResourcesCreated() { return waitForResourcesCreated; diff --git a/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java b/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java index 45d3653a28b6a..af4595c5bbd76 100644 --- a/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java +++ b/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java @@ -43,8 +43,8 @@ public Object parse(String line) { if (line.startsWith(SCHEMA_PREFIX)) { assertThat("Early schema already declared " + earlySchema, earlySchema.length(), is(0)); earlySchema.append(line.substring(SCHEMA_PREFIX.length()).trim()); - } else if (line.toLowerCase(Locale.ROOT).startsWith("required_feature:")) { - requiredCapabilities.add(line.substring("required_feature:".length()).trim().replace("esql.", "")); + } else if (line.toLowerCase(Locale.ROOT).startsWith("required_capability:")) { + requiredCapabilities.add(line.substring("required_capability:".length()).trim()); } else { if (line.endsWith(";")) { // pick up the query diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java index 665548c432ca0..7ede898fa0425 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/Rollup.java @@ -42,6 +42,8 @@ import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; +import org.elasticsearch.xpack.rollup.action.RollupInfoTransportAction; +import org.elasticsearch.xpack.rollup.action.RollupUsageTransportAction; import org.elasticsearch.xpack.rollup.action.TransportDeleteRollupJobAction; import org.elasticsearch.xpack.rollup.action.TransportGetRollupCapsAction; import org.elasticsearch.xpack.rollup.action.TransportGetRollupIndexCapsAction; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupInfoTransportAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupInfoTransportAction.java similarity index 95% rename from x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupInfoTransportAction.java rename to x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupInfoTransportAction.java index 9bdb514ea5b30..0bbd27c7281de 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupInfoTransportAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupInfoTransportAction.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.rollup; +package org.elasticsearch.xpack.rollup.action; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.common.inject.Inject; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupUsageTransportAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupUsageTransportAction.java similarity index 98% rename from x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupUsageTransportAction.java rename to x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupUsageTransportAction.java index c3b568fc32b71..c711553c99a17 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupUsageTransportAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupUsageTransportAction.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.rollup; +package org.elasticsearch.xpack.rollup.action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupInfoTransportActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/RollupInfoTransportActionTests.java similarity index 98% rename from x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupInfoTransportActionTests.java rename to x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/RollupInfoTransportActionTests.java index 243b478db6dbf..d2304b2c7d9a3 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupInfoTransportActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/RollupInfoTransportActionTests.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.rollup; +package org.elasticsearch.xpack.rollup.action; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java index 96284b2826e48..e37823f8d3c4c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamSecurityIT.java @@ -90,7 +90,12 @@ public ClusterState execute(ClusterState currentState) throws Exception { ? original.getIndices().get(0).getName() + "-broken" : original.getIndices().get(0).getName(); DataStream broken = original.copy() - .setIndices(List.of(new Index(brokenIndexName, "broken"), original.getIndices().get(1))) + .setBackingIndices( + original.getBackingIndices() + .copy() + .setIndices(List.of(new Index(brokenIndexName, "broken"), original.getIndices().get(1))) + .build() + ) .build(); brokenDataStreamHolder.set(broken); return ClusterState.builder(currentState) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java index 7c753692628cb..286a9cb736b1b 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java @@ -7,11 +7,13 @@ package org.elasticsearch.integration; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; -import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.ReservedStateErrorMetadata; @@ -25,10 +27,15 @@ import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.authc.RealmConfig; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.security.action.rolemapping.ReservedRoleMappingAction; import org.junit.After; @@ -39,25 +46,31 @@ import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; +import java.util.function.Consumer; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; import static org.elasticsearch.xcontent.XContentType.JSON; import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7; import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; /** - * Tests that file settings service can properly add role mappings and detect REST clashes - * with the reserved role mappings. + * Tests that file settings service can properly add role mappings. */ public class RoleMappingFileSettingsIT extends NativeRealmIntegTestCase { @@ -135,12 +148,21 @@ public class RoleMappingFileSettingsIT extends NativeRealmIntegTestCase { } }"""; + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + Settings.Builder builder = Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + // some tests make use of cluster-state based role mappings + .put("xpack.security.authc.cluster_state_role_mappings.enabled", true); + return builder.build(); + } + @After public void cleanUp() { updateClusterSettings(Settings.builder().putNull("indices.recovery.max_bytes_per_sec")); } - private void writeJSONFile(String node, String json) throws Exception { + public static void writeJSONFile(String node, String json, Logger logger, AtomicLong versionCounter) throws Exception { long version = versionCounter.incrementAndGet(); FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); @@ -151,10 +173,11 @@ private void writeJSONFile(String node, String json) throws Exception { Files.createDirectories(fileSettingsService.watchedFileDir()); Path tempFilePath = createTempFile(); - logger.info("--> writing JSON config to node {} with path {}", node, tempFilePath); + logger.info("--> before writing JSON config to node {} with path {}", node, tempFilePath); logger.info(Strings.format(json, version)); Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); + logger.info("--> after writing JSON config to node {} with path {}", node, tempFilePath); } private Tuple setupClusterStateListener(String node, String expectedKey) { @@ -238,49 +261,41 @@ private void assertRoleMappingsSaveOK(CountDownLatch savedClusterState, AtomicLo expectThrows(ExecutionException.class, () -> clusterAdmin().updateSettings(req).get()).getMessage() ); + for (UserRoleMapper userRoleMapper : internalCluster().getInstances(UserRoleMapper.class)) { + PlainActionFuture> resolveRolesFuture = new PlainActionFuture<>(); + userRoleMapper.resolveRoles( + new UserRoleMapper.UserData("anyUsername", null, List.of(), Map.of(), mock(RealmConfig.class)), + resolveRolesFuture + ); + assertThat(resolveRolesFuture.get(), containsInAnyOrder("kibana_user", "fleet_user")); + } + + // the role mappings are not retrievable by the role mapping action (which only accesses "native" i.e. index-based role mappings) var request = new GetRoleMappingsRequest(); request.setNames("everyone_kibana", "everyone_fleet"); var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); - assertTrue(response.hasMappings()); - assertThat( - Arrays.stream(response.mappings()).map(r -> r.getName()).collect(Collectors.toSet()), - allOf(notNullValue(), containsInAnyOrder("everyone_kibana", "everyone_fleet")) - ); + assertFalse(response.hasMappings()); + assertThat(response.mappings(), emptyArray()); - // Try using the REST API to update the everyone_kibana role mapping - // This should fail, we have reserved certain role mappings in operator mode - assertEquals( - "Failed to process request " - + "[org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest/unset] " - + "with errors: [[everyone_kibana] set as read-only by [file_settings]]", - expectThrows( - IllegalArgumentException.class, - () -> client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_kibana")).actionGet() - ).getMessage() - ); - assertEquals( - "Failed to process request " - + "[org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest/unset] " - + "with errors: [[everyone_fleet] set as read-only by [file_settings]]", - expectThrows( - IllegalArgumentException.class, - () -> client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_fleet")).actionGet() - ).getMessage() - ); + // role mappings (with the same names) can also be stored in the "native" store + var putRoleMappingResponse = client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_kibana")).actionGet(); + assertTrue(putRoleMappingResponse.isCreated()); + putRoleMappingResponse = client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_fleet")).actionGet(); + assertTrue(putRoleMappingResponse.isCreated()); } public void testRoleMappingsApplied() throws Exception { ensureGreen(); var savedClusterState = setupClusterStateListener(internalCluster().getMasterName(), "everyone_kibana"); - writeJSONFile(internalCluster().getMasterName(), testJSON); + writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter); assertRoleMappingsSaveOK(savedClusterState.v1(), savedClusterState.v2()); logger.info("---> cleanup cluster settings..."); savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), emptyJSON); + writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -292,32 +307,65 @@ public void testRoleMappingsApplied() throws Exception { clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()) ); - var request = new GetRoleMappingsRequest(); - request.setNames("everyone_kibana", "everyone_fleet"); - var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); - assertFalse(response.hasMappings()); + // native role mappings are not affected by the removal of the cluster-state based ones + { + var request = new GetRoleMappingsRequest(); + request.setNames("everyone_kibana", "everyone_fleet"); + var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); + assertTrue(response.hasMappings()); + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder("everyone_kibana", "everyone_fleet") + ); + } + + // and roles are resolved based on the native role mappings + for (UserRoleMapper userRoleMapper : internalCluster().getInstances(UserRoleMapper.class)) { + PlainActionFuture> resolveRolesFuture = new PlainActionFuture<>(); + userRoleMapper.resolveRoles( + new UserRoleMapper.UserData("anyUsername", null, List.of(), Map.of(), mock(RealmConfig.class)), + resolveRolesFuture + ); + assertThat(resolveRolesFuture.get(), contains("kibana_user_native")); + } + + { + var request = new DeleteRoleMappingRequest(); + request.setName("everyone_kibana"); + var response = client().execute(DeleteRoleMappingAction.INSTANCE, request).get(); + assertTrue(response.isFound()); + request = new DeleteRoleMappingRequest(); + request.setName("everyone_fleet"); + response = client().execute(DeleteRoleMappingAction.INSTANCE, request).get(); + assertTrue(response.isFound()); + } + + // no roles are resolved now, because both native and cluster-state based stores have been cleared + for (UserRoleMapper userRoleMapper : internalCluster().getInstances(UserRoleMapper.class)) { + PlainActionFuture> resolveRolesFuture = new PlainActionFuture<>(); + userRoleMapper.resolveRoles( + new UserRoleMapper.UserData("anyUsername", null, List.of(), Map.of(), mock(RealmConfig.class)), + resolveRolesFuture + ); + assertThat(resolveRolesFuture.get(), empty()); + } } - private Tuple setupClusterStateListenerForError(String node) { - ClusterService clusterService = internalCluster().clusterService(node); + public static Tuple setupClusterStateListenerForError( + ClusterService clusterService, + Consumer errorMetadataConsumer + ) { CountDownLatch savedClusterState = new CountDownLatch(1); AtomicLong metadataVersion = new AtomicLong(-1); clusterService.addListener(new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); - if (reservedState != null - && reservedState.errorMetadata() != null - && reservedState.errorMetadata().errorKind() == ReservedStateErrorMetadata.ErrorKind.PARSING) { + if (reservedState != null && reservedState.errorMetadata() != null) { clusterService.removeListener(this); metadataVersion.set(event.state().metadata().version()); savedClusterState.countDown(); - assertEquals(ReservedStateErrorMetadata.ErrorKind.PARSING, reservedState.errorMetadata().errorKind()); - assertThat(reservedState.errorMetadata().errors(), allOf(notNullValue(), hasSize(1))); - assertThat( - reservedState.errorMetadata().errors().get(0), - containsString("failed to parse role-mapping [everyone_kibana_bad]. missing field [rules]") - ); + errorMetadataConsumer.accept(reservedState.errorMetadata()); } } }); @@ -325,22 +373,13 @@ public void clusterChanged(ClusterChangedEvent event) { return new Tuple<>(savedClusterState, metadataVersion); } - private void assertRoleMappingsNotSaved(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception { - boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); - assertTrue(awaitSuccessful); - - // This should succeed, nothing was reserved - client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_kibana_bad")).get(); - client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_fleet_ok")).get(); - } - public void testErrorSaved() throws Exception { ensureGreen(); // save an empty file to clear any prior state, this ensures we don't get a stale file left over by another test var savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); - writeJSONFile(internalCluster().getMasterName(), emptyJSON); + writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -353,76 +392,94 @@ public void testErrorSaved() throws Exception { ); // save a bad file - savedClusterState = setupClusterStateListenerForError(internalCluster().getMasterName()); - - writeJSONFile(internalCluster().getMasterName(), testErrorJSON); - assertRoleMappingsNotSaved(savedClusterState.v1(), savedClusterState.v2()); - } - - private Tuple setupClusterStateListenerForSecurityWriteError(String node) { - ClusterService clusterService = internalCluster().clusterService(node); - CountDownLatch savedClusterState = new CountDownLatch(1); - AtomicLong metadataVersion = new AtomicLong(-1); - clusterService.addListener(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); - if (reservedState != null - && reservedState.errorMetadata() != null - && reservedState.errorMetadata().errorKind() == ReservedStateErrorMetadata.ErrorKind.VALIDATION) { - clusterService.removeListener(this); - metadataVersion.set(event.state().metadata().version()); - savedClusterState.countDown(); - assertEquals(ReservedStateErrorMetadata.ErrorKind.VALIDATION, reservedState.errorMetadata().errorKind()); - assertThat(reservedState.errorMetadata().errors(), allOf(notNullValue(), hasSize(1))); - assertThat(reservedState.errorMetadata().errors().get(0), containsString("closed")); - } + savedClusterState = setupClusterStateListenerForError( + internalCluster().getCurrentMasterNodeInstance(ClusterService.class), + errorMetadata -> { + assertEquals(ReservedStateErrorMetadata.ErrorKind.PARSING, errorMetadata.errorKind()); + assertThat(errorMetadata.errors(), allOf(notNullValue(), hasSize(1))); + assertThat( + errorMetadata.errors().get(0), + containsString("failed to parse role-mapping [everyone_kibana_bad]. missing field [rules]") + ); } - }); - - return new Tuple<>(savedClusterState, metadataVersion); - } - - public void testRoleMappingFailsToWriteToStore() throws Exception { - ensureGreen(); - - var savedClusterState = setupClusterStateListenerForSecurityWriteError(internalCluster().getMasterName()); - - final CloseIndexResponse closeIndexResponse = indicesAdmin().close(new CloseIndexRequest(INTERNAL_SECURITY_MAIN_INDEX_7)).get(); - assertTrue(closeIndexResponse.isAcknowledged()); + ); - writeJSONFile(internalCluster().getMasterName(), testJSON); - boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + writeJSONFile(internalCluster().getMasterName(), testErrorJSON, logger, versionCounter); + awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); - var request = new GetRoleMappingsRequest(); - request.setNames("everyone_kibana", "everyone_fleet"); - - var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); - assertFalse(response.hasMappings()); - - final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(savedClusterState.v2().get()) - ).get(); + // no roles are resolved because both role mapping stores are empty + for (UserRoleMapper userRoleMapper : internalCluster().getInstances(UserRoleMapper.class)) { + PlainActionFuture> resolveRolesFuture = new PlainActionFuture<>(); + userRoleMapper.resolveRoles( + new UserRoleMapper.UserData("anyUsername", null, List.of(), Map.of(), mock(RealmConfig.class)), + resolveRolesFuture + ); + assertThat(resolveRolesFuture.get(), empty()); + } + } - assertNull( - clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()) - ); + public void testRoleMappingApplyWithSecurityIndexClosed() throws Exception { + ensureGreen(); - ReservedStateMetadata reservedState = clusterStateResponse.getState() - .metadata() - .reservedStateMetadata() - .get(FileSettingsService.NAMESPACE); + // expect the role mappings to apply even if the .security index is closed + var savedClusterState = setupClusterStateListener(internalCluster().getMasterName(), "everyone_kibana"); - ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedRoleMappingAction.NAME); - assertTrue(handlerMetadata == null || handlerMetadata.keys().isEmpty()); + try { + var closeIndexResponse = indicesAdmin().close(new CloseIndexRequest(INTERNAL_SECURITY_MAIN_INDEX_7)).get(); + assertTrue(closeIndexResponse.isAcknowledged()); + + writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter); + boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + // no native role mappings exist + var request = new GetRoleMappingsRequest(); + request.setNames("everyone_kibana", "everyone_fleet"); + var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); + assertFalse(response.hasMappings()); + + // cluster state settings are also applied + var clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().waitForMetadataVersion(savedClusterState.v2().get())) + .get(); + assertThat( + clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("50mb") + ); + + ReservedStateMetadata reservedState = clusterStateResponse.getState() + .metadata() + .reservedStateMetadata() + .get(FileSettingsService.NAMESPACE); + + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedRoleMappingAction.NAME); + assertThat(handlerMetadata.keys(), containsInAnyOrder("everyone_kibana", "everyone_fleet")); + + // and roles are resolved based on the cluster-state role mappings + for (UserRoleMapper userRoleMapper : internalCluster().getInstances(UserRoleMapper.class)) { + PlainActionFuture> resolveRolesFuture = new PlainActionFuture<>(); + userRoleMapper.resolveRoles( + new UserRoleMapper.UserData("anyUsername", null, List.of(), Map.of(), mock(RealmConfig.class)), + resolveRolesFuture + ); + assertThat(resolveRolesFuture.get(), containsInAnyOrder("kibana_user", "fleet_user")); + } + } finally { + savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); + writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + var openIndexResponse = indicesAdmin().open(new OpenIndexRequest(INTERNAL_SECURITY_MAIN_INDEX_7)).get(); + assertTrue(openIndexResponse.isAcknowledged()); + } } private PutRoleMappingRequest sampleRestRequest(String name) throws Exception { var json = """ { - "enabled": false, - "roles": [ "kibana_user" ], + "enabled": true, + "roles": [ "kibana_user_native" ], "rules": { "field": { "username": "*" } }, "metadata": { "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7" @@ -433,8 +490,7 @@ private PutRoleMappingRequest sampleRestRequest(String name) throws Exception { var bis = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); var parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, bis) ) { - ExpressionRoleMapping mapping = ExpressionRoleMapping.parse(name, parser); - return PutRoleMappingRequest.fromMapping(mapping); + return new PutRoleMappingRequestBuilder(null).source(name, parser).request(); } } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java deleted file mode 100644 index 48e97b7afb897..0000000000000 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.analysis.common.CommonAnalysisPlugin; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.metadata.ReservedStateErrorMetadata; -import org.elasticsearch.cluster.metadata.ReservedStateMetadata; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.core.Strings; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.reindex.ReindexPlugin; -import org.elasticsearch.reservedstate.service.FileSettingsService; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.transport.netty4.Netty4Plugin; -import org.elasticsearch.xpack.wildcard.Wildcard; - -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.Arrays; -import java.util.Collection; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import static org.hamcrest.Matchers.allOf; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.notNullValue; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) -public class FileSettingsRoleMappingsStartupIT extends SecurityIntegTestCase { - - private static AtomicLong versionCounter = new AtomicLong(1); - private static String testJSONForFailedCase = """ - { - "metadata": { - "version": "%s", - "compatibility": "8.4.0" - }, - "state": { - "role_mappings": { - "everyone_kibana_2": { - "enabled": true, - "roles": [ "kibana_user" ], - "rules": { "field": { "username": "*" } }, - "metadata": { - "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", - "_foo": "something" - } - } - } - } - }"""; - - @Override - protected void doAssertXPackIsInstalled() {} - - @Override - protected Path nodeConfigPath(int nodeOrdinal) { - return null; - } - - private void writeJSONFile(String node, String json) throws Exception { - long version = versionCounter.incrementAndGet(); - - FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); - - Files.deleteIfExists(fileSettingsService.watchedFile()); - - Files.createDirectories(fileSettingsService.watchedFileDir()); - Path tempFilePath = createTempFile(); - - logger.info("--> writing JSON config to node {} with path {}", node, tempFilePath); - logger.info(Strings.format(json, version)); - Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); - Files.move(tempFilePath, fileSettingsService.watchedFile(), StandardCopyOption.ATOMIC_MOVE); - } - - private Tuple setupClusterStateListenerForError(String node) { - ClusterService clusterService = internalCluster().clusterService(node); - CountDownLatch savedClusterState = new CountDownLatch(1); - AtomicLong metadataVersion = new AtomicLong(-1); - clusterService.addListener(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); - if (reservedState != null && reservedState.errorMetadata() != null) { - assertEquals(ReservedStateErrorMetadata.ErrorKind.VALIDATION, reservedState.errorMetadata().errorKind()); - assertThat(reservedState.errorMetadata().errors(), allOf(notNullValue(), hasSize(1))); - assertThat(reservedState.errorMetadata().errors().get(0), containsString("Fake exception")); - clusterService.removeListener(this); - metadataVersion.set(event.state().metadata().version()); - savedClusterState.countDown(); - } else if (reservedState != null) { - logger.debug(() -> "Got reserved state update without error metadata: " + reservedState); - } else { - logger.debug(() -> "Got cluster state update: " + event.source()); - } - } - }); - - return new Tuple<>(savedClusterState, metadataVersion); - } - - @TestLogging( - value = "org.elasticsearch.common.file:DEBUG,org.elasticsearch.xpack.security:DEBUG,org.elasticsearch.cluster.metadata:DEBUG", - reason = "https://github.com/elastic/elasticsearch/issues/98391" - ) - public void testFailsOnStartMasterNodeWithError() throws Exception { - internalCluster().setBootstrapMasterNodeIndex(0); - - internalCluster().startMasterOnlyNode(); - - logger.info("--> write some role mappings, no other file settings"); - writeJSONFile(internalCluster().getMasterName(), testJSONForFailedCase); - var savedClusterState = setupClusterStateListenerForError(internalCluster().getMasterName()); - - boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); - assertTrue(awaitSuccessful); - } - - public Collection> nodePlugins() { - return Arrays.asList( - UnstableLocalStateSecurity.class, - Netty4Plugin.class, - ReindexPlugin.class, - CommonAnalysisPlugin.class, - InternalSettingsPlugin.class, - MapperExtrasPlugin.class, - Wildcard.class - ); - } - -} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 58d6657b99e32..076ac01f1c8f3 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -2673,7 +2673,9 @@ public void testUpdateApiKeysAutoUpdatesLegacySuperuserRoleDescriptor() throws E // raw document has the legacy superuser role descriptor expectRoleDescriptorsForApiKey("limited_by_role_descriptors", legacySuperuserRoleDescriptor, getApiKeyDocument(apiKeyId)); - final Set currentSuperuserRoleDescriptors = Set.of(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR); + final Set currentSuperuserRoleDescriptors = ApiKeyService.removeUserRoleDescriptorDescriptions( + Set.of(ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR) + ); // The first request is not a noop because we are auto-updating the legacy role descriptors to 8.x role descriptors assertSingleUpdate( apiKeyId, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index ef08f855a46cc..0ff4f1160af56 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -1103,8 +1103,7 @@ Collection createComponents( new SecurityUsageServices(realms, allRolesStore, nativeRoleMappingStore, ipFilter.get(), profileService, apiKeyService) ); - reservedRoleMappingAction.set(new ReservedRoleMappingAction(nativeRoleMappingStore)); - systemIndices.getMainIndexManager().onStateRecovered(state -> reservedRoleMappingAction.get().securityIndexRecovered()); + reservedRoleMappingAction.set(new ReservedRoleMappingAction()); cacheInvalidatorRegistry.validate(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java index 852887767578f..73d1a1abcdb50 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java @@ -7,24 +7,18 @@ package org.elasticsearch.xpack.security.action.rolemapping; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.GroupedActionListener; -import org.elasticsearch.common.util.concurrent.ListenableFuture; -import org.elasticsearch.reservedstate.NonStateTransformResult; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; -import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -38,123 +32,59 @@ * It is used by the ReservedClusterStateService to add/update or remove role mappings. Typical usage * for this action is in the context of file based settings. */ -public class ReservedRoleMappingAction implements ReservedClusterStateHandler> { +public class ReservedRoleMappingAction implements ReservedClusterStateHandler> { public static final String NAME = "role_mappings"; - private final NativeRoleMappingStore roleMappingStore; - private final ListenableFuture securityIndexRecoveryListener = new ListenableFuture<>(); - - /** - * Creates a ReservedRoleMappingAction - * - * @param roleMappingStore requires {@link NativeRoleMappingStore} for storing/deleting the mappings - */ - public ReservedRoleMappingAction(NativeRoleMappingStore roleMappingStore) { - this.roleMappingStore = roleMappingStore; - } - @Override public String name() { return NAME; } - private static Collection prepare(List roleMappings) { - List requests = roleMappings.stream().map(rm -> PutRoleMappingRequest.fromMapping(rm)).toList(); - - var exceptions = new ArrayList(); - for (var request : requests) { - // File based defined role mappings are allowed to use MetadataUtils.RESERVED_PREFIX - var exception = request.validate(false); - if (exception != null) { - exceptions.add(exception); - } - } - - if (exceptions.isEmpty() == false) { - var illegalArgumentException = new IllegalArgumentException("error on validating put role mapping requests"); - exceptions.forEach(illegalArgumentException::addSuppressed); - throw illegalArgumentException; - } - - return requests; - } - @Override public TransformState transform(Object source, TransformState prevState) throws Exception { - // We execute the prepare() call to catch any errors in the transform phase. - // Since we store the role mappings outside the cluster state, we do the actual save with a - // non cluster state transform call. @SuppressWarnings("unchecked") - var requests = prepare((List) source); - return new TransformState( - prevState.state(), - prevState.keys(), - l -> securityIndexRecoveryListener.addListener( - ActionListener.wrap(ignored -> nonStateTransform(requests, prevState, l), l::onFailure) - ) - ); - } - - // Exposed for testing purposes - protected void nonStateTransform( - Collection requests, - TransformState prevState, - ActionListener listener - ) { - Set entities = requests.stream().map(r -> r.getName()).collect(Collectors.toSet()); - Set toDelete = new HashSet<>(prevState.keys()); - toDelete.removeAll(entities); - - final int tasksCount = requests.size() + toDelete.size(); - - // Nothing to do, don't start a group listener with 0 actions - if (tasksCount == 0) { - listener.onResponse(new NonStateTransformResult(ReservedRoleMappingAction.NAME, Set.of())); - return; - } - - GroupedActionListener taskListener = new GroupedActionListener<>(tasksCount, new ActionListener<>() { - @Override - public void onResponse(Collection booleans) { - listener.onResponse(new NonStateTransformResult(ReservedRoleMappingAction.NAME, Collections.unmodifiableSet(entities))); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); - - for (var request : requests) { - roleMappingStore.putRoleMapping(request, taskListener); - } - - for (var mappingToDelete : toDelete) { - var deleteRequest = new DeleteRoleMappingRequest(); - deleteRequest.setName(mappingToDelete); - roleMappingStore.deleteRoleMapping(deleteRequest, taskListener); + Set roleMappings = validate((List) source); + RoleMappingMetadata newRoleMappingMetadata = new RoleMappingMetadata(roleMappings); + if (newRoleMappingMetadata.equals(RoleMappingMetadata.getFromClusterState(prevState.state()))) { + return prevState; + } else { + ClusterState newState = newRoleMappingMetadata.updateClusterState(prevState.state()); + Set entities = newRoleMappingMetadata.getRoleMappings() + .stream() + .map(ExpressionRoleMapping::getName) + .collect(Collectors.toSet()); + return new TransformState(newState, entities); } } @Override - public List fromXContent(XContentParser parser) throws IOException { - List result = new ArrayList<>(); - + public List fromXContent(XContentParser parser) throws IOException { + List result = new ArrayList<>(); Map source = parser.map(); - for (String name : source.keySet()) { @SuppressWarnings("unchecked") Map content = (Map) source.get(name); try (XContentParser mappingParser = mapToXContentParser(XContentParserConfiguration.EMPTY, content)) { - ExpressionRoleMapping mapping = ExpressionRoleMapping.parse(name, mappingParser); - result.add(mapping); + result.add(new PutRoleMappingRequestBuilder(null).source(name, mappingParser).request()); } } - return result; } - public void securityIndexRecovered() { - securityIndexRecoveryListener.onResponse(null); + private Set validate(List roleMappings) { + var exceptions = new ArrayList(); + for (var roleMapping : roleMappings) { + // File based defined role mappings are allowed to use MetadataUtils.RESERVED_PREFIX + var exception = roleMapping.validate(false); + if (exception != null) { + exceptions.add(exception); + } + } + if (exceptions.isEmpty() == false) { + var illegalArgumentException = new IllegalArgumentException("error on validating put role mapping requests"); + exceptions.forEach(illegalArgumentException::addSuppressed); + throw illegalArgumentException; + } + return roleMappings.stream().map(PutRoleMappingRequest::getMapping).collect(Collectors.toUnmodifiableSet()); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java index 811d357b89f89..b4e8d5d6db83f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java @@ -8,9 +8,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.ReservedStateAwareHandledTransportAction; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; @@ -18,12 +18,7 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; -import java.util.Optional; -import java.util.Set; - -public class TransportDeleteRoleMappingAction extends ReservedStateAwareHandledTransportAction< - DeleteRoleMappingRequest, - DeleteRoleMappingResponse> { +public class TransportDeleteRoleMappingAction extends HandledTransportAction { private final NativeRoleMappingStore roleMappingStore; @@ -31,25 +26,20 @@ public class TransportDeleteRoleMappingAction extends ReservedStateAwareHandledT public TransportDeleteRoleMappingAction( ActionFilters actionFilters, TransportService transportService, - ClusterService clusterService, NativeRoleMappingStore roleMappingStore ) { - super(DeleteRoleMappingAction.NAME, clusterService, transportService, actionFilters, DeleteRoleMappingRequest::new); + super( + DeleteRoleMappingAction.NAME, + transportService, + actionFilters, + DeleteRoleMappingRequest::new, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); this.roleMappingStore = roleMappingStore; } @Override - protected void doExecuteProtected(Task task, DeleteRoleMappingRequest request, ActionListener listener) { + protected void doExecute(Task task, DeleteRoleMappingRequest request, ActionListener listener) { roleMappingStore.deleteRoleMapping(request, listener.safeMap(DeleteRoleMappingResponse::new)); } - - @Override - public Optional reservedStateHandlerName() { - return Optional.of(ReservedRoleMappingAction.NAME); - } - - @Override - public Set modifiedKeys(DeleteRoleMappingRequest request) { - return Set.of(request.getName()); - } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java index 5e32e4f903f81..44c72bc13a54b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java @@ -8,9 +8,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.ReservedStateAwareHandledTransportAction; -import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; @@ -18,10 +18,7 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; -import java.util.Optional; -import java.util.Set; - -public class TransportPutRoleMappingAction extends ReservedStateAwareHandledTransportAction { +public class TransportPutRoleMappingAction extends HandledTransportAction { private final NativeRoleMappingStore roleMappingStore; @@ -29,32 +26,17 @@ public class TransportPutRoleMappingAction extends ReservedStateAwareHandledTran public TransportPutRoleMappingAction( ActionFilters actionFilters, TransportService transportService, - ClusterService clusterService, NativeRoleMappingStore roleMappingStore ) { - super(PutRoleMappingAction.NAME, clusterService, transportService, actionFilters, PutRoleMappingRequest::new); + super(PutRoleMappingAction.NAME, transportService, actionFilters, PutRoleMappingRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.roleMappingStore = roleMappingStore; } @Override - protected void doExecuteProtected( - Task task, - final PutRoleMappingRequest request, - final ActionListener listener - ) { + protected void doExecute(Task task, final PutRoleMappingRequest request, final ActionListener listener) { roleMappingStore.putRoleMapping( request, ActionListener.wrap(created -> listener.onResponse(new PutRoleMappingResponse(created)), listener::onFailure) ); } - - @Override - public Optional reservedStateHandlerName() { - return Optional.of(ReservedRoleMappingAction.NAME); - } - - @Override - public Set modifiedKeys(PutRoleMappingRequest request) { - return Set.of(request.getName()); - } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 55a89e184f84f..883d7cb8ab103 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -371,7 +371,13 @@ && hasRemoteIndices(request.getRoleDescriptors())) { } } - private Set removeUserRoleDescriptorDescriptions(Set userRoleDescriptors) { + /** + * This method removes description from the given user's (limited-by) role descriptors. + * The description field is not supported for API key role descriptors hence storing limited-by roles with descriptions + * would be inconsistent and require handling backwards compatibility. + * Hence why we have to remove them before create/update of API key roles. + */ + static Set removeUserRoleDescriptorDescriptions(Set userRoleDescriptors) { return userRoleDescriptors.stream().map(roleDescriptor -> { if (roleDescriptor.hasDescription()) { return new RoleDescriptor( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java index e7e24037543fa..55562c8ee0138 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/rolemapping/RestPutRoleMappingAction.java @@ -8,6 +8,8 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestRequest; @@ -17,6 +19,7 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; @@ -57,12 +60,18 @@ public String getName() { @Override public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { - final String name = request.param("name"); - PutRoleMappingRequestBuilder requestBuilder = new PutRoleMappingRequestBuilder(client).source( - name, - request.requiredContent(), - request.getXContentType() - ).setRefreshPolicy(request.param("refresh")); + String name = request.param("name"); + String refresh = request.param("refresh"); + PutRoleMappingRequestBuilder requestBuilder; + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + request.requiredContent(), + request.getXContentType() + ) + ) { + requestBuilder = new PutRoleMappingRequestBuilder(client).source(name, parser).setRefreshPolicy(refresh); + } return channel -> requestBuilder.execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(PutRoleMappingResponse response, XContentBuilder builder) throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalReservedUnstableSecurityStateHandlerProvider.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalReservedUnstableSecurityStateHandlerProvider.java deleted file mode 100644 index b4a07093e49c3..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalReservedUnstableSecurityStateHandlerProvider.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider; - -/** - * Mock Security Provider implementation for the {@link ReservedClusterStateHandlerProvider} service interface. This is used - * for {@link org.elasticsearch.test.ESIntegTestCase} because the Security Plugin is really LocalStateSecurity in those tests. - *

- * Unlike {@link LocalReservedSecurityStateHandlerProvider} this implementation is mocked to implement the - * {@link UnstableLocalStateSecurity}. Separate implementation is needed, because the SPI creation code matches the constructor - * signature when instantiating. E.g. we need to match {@link UnstableLocalStateSecurity} instead of {@link LocalStateSecurity} - */ -public class LocalReservedUnstableSecurityStateHandlerProvider extends LocalReservedSecurityStateHandlerProvider { - public LocalReservedUnstableSecurityStateHandlerProvider() { - throw new IllegalStateException("Provider must be constructed using PluginsService"); - } - - public LocalReservedUnstableSecurityStateHandlerProvider(UnstableLocalStateSecurity plugin) { - super(plugin); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/UnstableLocalStateSecurity.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/UnstableLocalStateSecurity.java deleted file mode 100644 index 5621bdced15b3..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/UnstableLocalStateSecurity.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.reservedstate.NonStateTransformResult; -import org.elasticsearch.reservedstate.ReservedClusterStateHandler; -import org.elasticsearch.reservedstate.TransformState; -import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; -import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.security.action.rolemapping.ReservedRoleMappingAction; - -import java.nio.file.Path; -import java.util.Collection; -import java.util.List; -import java.util.Optional; - -/** - * A test class that allows us to Inject new type of Reserved Handler that can - * simulate errors in saving role mappings. - *

- * We can't use our regular path to simply make an extension of LocalStateSecurity - * in an integration test class, because the reserved handlers are injected through - * SPI. (see {@link LocalReservedUnstableSecurityStateHandlerProvider}) - */ -public final class UnstableLocalStateSecurity extends LocalStateSecurity { - - public UnstableLocalStateSecurity(Settings settings, Path configPath) throws Exception { - super(settings, configPath); - // We reuse most of the initialization of LocalStateSecurity, we then just overwrite - // the security plugin with an extra method to give us a fake RoleMappingAction. - Optional security = plugins.stream().filter(p -> p instanceof Security).findFirst(); - if (security.isPresent()) { - plugins.remove(security.get()); - } - - UnstableLocalStateSecurity thisVar = this; - var action = new ReservedUnstableRoleMappingAction(); - - plugins.add(new Security(settings, super.securityExtensions()) { - @Override - protected SSLService getSslService() { - return thisVar.getSslService(); - } - - @Override - protected XPackLicenseState getLicenseState() { - return thisVar.getLicenseState(); - } - - @Override - List> reservedClusterStateHandlers() { - // pretend the security index is initialized after 2 seconds - var timer = new java.util.Timer(); - timer.schedule(new java.util.TimerTask() { - @Override - public void run() { - action.securityIndexRecovered(); - timer.cancel(); - } - }, 2_000); - return List.of(action); - } - }); - } - - public static class ReservedUnstableRoleMappingAction extends ReservedRoleMappingAction { - /** - * Creates a fake ReservedRoleMappingAction that doesn't actually use the role mapping store - */ - public ReservedUnstableRoleMappingAction() { - // we don't actually need a NativeRoleMappingStore - super(null); - } - - /** - * The nonStateTransform method is the only one that uses the native store, we simply pretend - * something has called the onFailure method of the listener. - */ - @Override - protected void nonStateTransform( - Collection requests, - TransformState prevState, - ActionListener listener - ) { - listener.onFailure(new IllegalStateException("Fake exception")); - } - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/reservedstate/ReservedRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/reservedstate/ReservedRoleMappingActionTests.java index 6cdca0cb3b24d..cac7c91f73ed1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/reservedstate/ReservedRoleMappingActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/reservedstate/ReservedRoleMappingActionTests.java @@ -7,77 +7,40 @@ package org.elasticsearch.xpack.security.action.reservedstate; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.reservedstate.NonStateTransformResult; import org.elasticsearch.reservedstate.TransformState; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.security.action.rolemapping.ReservedRoleMappingAction; -import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; -import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.Collections; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; +import static org.hamcrest.Matchers.nullValue; /** * Tests that the ReservedRoleMappingAction does validation, can add and remove role mappings */ public class ReservedRoleMappingActionTests extends ESTestCase { + private TransformState processJSON(ReservedRoleMappingAction action, TransformState prevState, String json) throws Exception { try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, json)) { var content = action.fromXContent(parser); var state = action.transform(content, prevState); - - CountDownLatch latch = new CountDownLatch(1); - AtomicReference> updatedKeys = new AtomicReference<>(); - AtomicReference error = new AtomicReference<>(); - state.nonStateTransform().accept(new ActionListener<>() { - @Override - public void onResponse(NonStateTransformResult nonStateTransformResult) { - updatedKeys.set(nonStateTransformResult.updatedKeys()); - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - error.set(e); - latch.countDown(); - } - }); - - latch.await(); - if (error.get() != null) { - throw error.get(); - } - return new TransformState(state.state(), updatedKeys.get()); + assertThat(state.nonStateTransform(), nullValue()); + return state; } } public void testValidation() { - var nativeRoleMappingStore = mockNativeRoleMappingStore(); - ClusterState state = ClusterState.builder(new ClusterName("elasticsearch")).build(); TransformState prevState = new TransformState(state, Collections.emptySet()); - ReservedRoleMappingAction action = new ReservedRoleMappingAction(nativeRoleMappingStore); - action.securityIndexRecovered(); - + ReservedRoleMappingAction action = new ReservedRoleMappingAction(); String badPolicyJSON = """ { "everyone_kibana": { @@ -97,7 +60,6 @@ public void testValidation() { } } }"""; - assertEquals( "failed to parse role-mapping [everyone_fleet]. missing field [rules]", expectThrows(ParsingException.class, () -> processJSON(action, prevState, badPolicyJSON)).getMessage() @@ -105,13 +67,9 @@ public void testValidation() { } public void testAddRemoveRoleMapping() throws Exception { - var nativeRoleMappingStore = mockNativeRoleMappingStore(); - ClusterState state = ClusterState.builder(new ClusterName("elasticsearch")).build(); TransformState prevState = new TransformState(state, Collections.emptySet()); - ReservedRoleMappingAction action = new ReservedRoleMappingAction(nativeRoleMappingStore); - action.securityIndexRecovered(); - + ReservedRoleMappingAction action = new ReservedRoleMappingAction(); String emptyJSON = ""; TransformState updatedState = processJSON(action, prevState, emptyJSON); @@ -147,102 +105,4 @@ public void testAddRemoveRoleMapping() throws Exception { updatedState = processJSON(action, prevState, emptyJSON); assertThat(updatedState.keys(), empty()); } - - @SuppressWarnings("unchecked") - public void testNonStateTransformWaitsOnAsyncActions() throws Exception { - var nativeRoleMappingStore = mockNativeRoleMappingStore(); - - doAnswer(invocation -> { - new Thread(() -> { - // Simulate put role mapping async action taking a while - try { - Thread.sleep(1_000); - ((ActionListener) invocation.getArgument(1)).onFailure(new IllegalStateException("err_done")); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - }).start(); - - return null; - }).when(nativeRoleMappingStore).putRoleMapping(any(), any()); - - doAnswer(invocation -> { - new Thread(() -> { - // Simulate delete role mapping async action taking a while - try { - Thread.sleep(1_000); - ((ActionListener) invocation.getArgument(1)).onFailure(new IllegalStateException("err_done")); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - }).start(); - - return null; - }).when(nativeRoleMappingStore).deleteRoleMapping(any(), any()); - - ClusterState state = ClusterState.builder(new ClusterName("elasticsearch")).build(); - TransformState updatedState = new TransformState(state, Collections.emptySet()); - ReservedRoleMappingAction action = new ReservedRoleMappingAction(nativeRoleMappingStore); - action.securityIndexRecovered(); - - String json = """ - { - "everyone_kibana": { - "enabled": true, - "roles": [ "kibana_user" ], - "rules": { "field": { "username": "*" } }, - "metadata": { - "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", - "_reserved": true - } - }, - "everyone_fleet": { - "enabled": true, - "roles": [ "fleet_user" ], - "rules": { "field": { "username": "*" } }, - "metadata": { - "uuid" : "a9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", - "_reserved": true - } - } - }"""; - - assertEquals( - "err_done", - expectThrows(IllegalStateException.class, () -> processJSON(action, new TransformState(state, Collections.emptySet()), json)) - .getMessage() - ); - - // Now that we've tested that we wait on putRoleMapping correctly, let it finish without exception, so we can test error on delete - doAnswer(invocation -> { - ((ActionListener) invocation.getArgument(1)).onResponse(true); - return null; - }).when(nativeRoleMappingStore).putRoleMapping(any(), any()); - - updatedState = processJSON(action, updatedState, json); - assertThat(updatedState.keys(), containsInAnyOrder("everyone_kibana", "everyone_fleet")); - - final TransformState currentState = new TransformState(updatedState.state(), updatedState.keys()); - - assertEquals("err_done", expectThrows(IllegalStateException.class, () -> processJSON(action, currentState, "")).getMessage()); - } - - @SuppressWarnings("unchecked") - private NativeRoleMappingStore mockNativeRoleMappingStore() { - final NativeRoleMappingStore nativeRoleMappingStore = spy( - new NativeRoleMappingStore(Settings.EMPTY, mock(Client.class), mock(SecurityIndexManager.class), mock(ScriptService.class)) - ); - - doAnswer(invocation -> { - ((ActionListener) invocation.getArgument(1)).onResponse(true); - return null; - }).when(nativeRoleMappingStore).putRoleMapping(any(), any()); - - doAnswer(invocation -> { - ((ActionListener) invocation.getArgument(1)).onResponse(true); - return null; - }).when(nativeRoleMappingStore).deleteRoleMapping(any(), any()); - - return nativeRoleMappingStore; - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingActionTests.java deleted file mode 100644 index 038e673e07862..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingActionTests.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security.action.rolemapping; - -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; -import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; - -import java.util.Collections; - -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.mockito.Mockito.mock; - -public class TransportDeleteRoleMappingActionTests extends ESTestCase { - public void testReservedStateHandler() { - var store = mock(NativeRoleMappingStore.class); - TransportService transportService = new TransportService( - Settings.EMPTY, - mock(Transport.class), - mock(ThreadPool.class), - TransportService.NOOP_TRANSPORT_INTERCEPTOR, - x -> null, - null, - Collections.emptySet() - ); - var action = new TransportDeleteRoleMappingAction(mock(ActionFilters.class), transportService, mock(ClusterService.class), store); - - assertEquals(ReservedRoleMappingAction.NAME, action.reservedStateHandlerName().get()); - - var deleteRequest = new DeleteRoleMappingRequest(); - deleteRequest.setName("kibana_all"); - assertThat(action.modifiedKeys(deleteRequest), containsInAnyOrder("kibana_all")); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java index 58a8e8e3d4751..6f789a10a3a6c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java @@ -9,16 +9,12 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; @@ -33,7 +29,6 @@ import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.iterableWithSize; @@ -60,7 +55,7 @@ public void setupMocks() { null, Collections.emptySet() ); - action = new TransportPutRoleMappingAction(mock(ActionFilters.class), transportService, mock(ClusterService.class), store); + action = new TransportPutRoleMappingAction(mock(ActionFilters.class), transportService, store); requestRef = new AtomicReference<>(null); @@ -99,39 +94,7 @@ private PutRoleMappingResponse put(String name, FieldExpression expression, Stri request.setMetadata(metadata); request.setEnabled(true); final PlainActionFuture future = new PlainActionFuture<>(); - action.doExecuteProtected(mock(Task.class), request, future); + action.doExecute(mock(Task.class), request, future); return future.get(); } - - public void testReservedStateHandler() throws Exception { - assertEquals(ReservedRoleMappingAction.NAME, action.reservedStateHandlerName().get()); - String json = """ - { - "everyone_kibana": { - "enabled": true, - "roles": [ "kibana_user" ], - "rules": { "field": { "username": "*" } }, - "metadata": { - "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7" - } - }, - "everyone_fleet": { - "enabled": true, - "roles": [ "fleet_user" ], - "rules": { "field": { "username": "*" } }, - "metadata": { - "uuid" : "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7" - } - } - }"""; - - try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, json)) { - ReservedRoleMappingAction roleMappingAction = new ReservedRoleMappingAction(store); - var parsedResult = roleMappingAction.fromXContent(parser); - - for (var mapping : parsedResult) { - assertThat(action.modifiedKeys(PutRoleMappingRequest.fromMapping(mapping)), containsInAnyOrder(mapping.getName())); - } - } - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 7752b85c6345c..0871e2568d225 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -1158,7 +1158,9 @@ private static Tuple, Map> newApiKeyDocument getFastStoredHashAlgoForTests().hash(new SecureString(key.toCharArray())), "test", authentication, - type == ApiKey.Type.CROSS_CLUSTER ? Set.of() : Collections.singleton(SUPERUSER_ROLE_DESCRIPTOR), + type == ApiKey.Type.CROSS_CLUSTER + ? Set.of() + : ApiKeyService.removeUserRoleDescriptorDescriptions(Set.of(SUPERUSER_ROLE_DESCRIPTOR)), Instant.now(), Instant.now().plus(expiry), keyRoles, @@ -1316,22 +1318,6 @@ public void testParseRoleDescriptorsMap() throws Exception { assertThat(roleDescriptors, hasSize(1)); assertThat(roleDescriptors.get(0), equalTo(roleARoleDescriptor)); - Map superUserRdMap; - try (XContentBuilder builder = JsonXContent.contentBuilder()) { - superUserRdMap = XContentHelper.convertToMap( - XContentType.JSON.xContent(), - BytesReference.bytes(SUPERUSER_ROLE_DESCRIPTOR.toXContent(builder, ToXContent.EMPTY_PARAMS, true)).streamInput(), - false - ); - } - roleDescriptors = service.parseRoleDescriptors( - apiKeyId, - Map.of(SUPERUSER_ROLE_DESCRIPTOR.getName(), superUserRdMap), - randomApiKeyRoleType() - ); - assertThat(roleDescriptors, hasSize(1)); - assertThat(roleDescriptors.get(0), equalTo(SUPERUSER_ROLE_DESCRIPTOR)); - final Map legacySuperUserRdMap; try (XContentBuilder builder = JsonXContent.contentBuilder()) { legacySuperUserRdMap = XContentHelper.convertToMap( @@ -1812,7 +1798,10 @@ public void testApiKeyDocCache() throws IOException, ExecutionException, Interru RoleReference.ApiKeyRoleType.LIMITED_BY ); assertEquals(1, limitedByRoleDescriptors.size()); - assertEquals(SUPERUSER_ROLE_DESCRIPTOR, limitedByRoleDescriptors.get(0)); + RoleDescriptor superuserWithoutDescription = ApiKeyService.removeUserRoleDescriptorDescriptions(Set.of(SUPERUSER_ROLE_DESCRIPTOR)) + .iterator() + .next(); + assertEquals(superuserWithoutDescription, limitedByRoleDescriptors.get(0)); if (metadata == null) { assertNull(cachedApiKeyDoc.metadataFlattened); } else { diff --git a/x-pack/plugin/security/src/test/resources/META-INF/services/org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider b/x-pack/plugin/security/src/test/resources/META-INF/services/org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider index 77c38d302d9c9..3d17572429bac 100644 --- a/x-pack/plugin/security/src/test/resources/META-INF/services/org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider +++ b/x-pack/plugin/security/src/test/resources/META-INF/services/org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider @@ -6,4 +6,3 @@ # org.elasticsearch.xpack.security.LocalReservedSecurityStateHandlerProvider -org.elasticsearch.xpack.security.LocalReservedUnstableSecurityStateHandlerProvider diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/DeleteShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/DeleteShutdownNodeAction.java index 4446e0aeae4db..14417c693f280 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/DeleteShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/DeleteShutdownNodeAction.java @@ -33,10 +33,12 @@ public static class Request extends AcknowledgedRequest { private final String nodeId; public Request(String nodeId) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.nodeId = nodeId; } public Request(StreamInput in) throws IOException { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); if (in.getTransportVersion().isPatchFrom(TransportVersions.V_8_13_4) || in.getTransportVersion().isPatchFrom(TransportVersions.SHUTDOWN_REQUEST_TIMEOUTS_FIX_8_14) || in.getTransportVersion().onOrAfter(TransportVersions.SHUTDOWN_REQUEST_TIMEOUTS_FIX)) { diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusAction.java index b82e6a08fb269..7266f8ff71129 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusAction.java @@ -43,6 +43,7 @@ public static class Request extends MasterNodeRequest { private final String[] nodeIds; public Request(String... nodeIds) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); this.nodeIds = nodeIds; } diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/PutShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/PutShutdownNodeAction.java index 8356285c10d0d..d857ee4b322d3 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/PutShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/PutShutdownNodeAction.java @@ -90,6 +90,7 @@ public Request( @Nullable String targetNodeName, @Nullable TimeValue gracePeriod ) { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); this.nodeId = nodeId; this.type = type; this.reason = reason; @@ -100,6 +101,7 @@ public Request( @UpdateForV9 // TODO call super(in) instead of explicitly reading superclass contents once bwc no longer needed public Request(StreamInput in) throws IOException { + super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); if (in.getTransportVersion().isPatchFrom(TransportVersions.V_8_13_4) || in.getTransportVersion().isPatchFrom(TransportVersions.SHUTDOWN_REQUEST_TIMEOUTS_FIX_8_14) || in.getTransportVersion().onOrAfter(TransportVersions.SHUTDOWN_REQUEST_TIMEOUTS_FIX)) { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/rollup/get_jobs.yml index bd40e29d0b675..671fb24715631 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/rollup/get_jobs.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -98,129 +98,3 @@ setup: - match: jobs: [] - ---- -"Test get all jobs": - - - skip: - awaits_fix: "Job ordering isn't guaranteed right now, cannot test" - - - do: - headers: - Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - rollup.put_job: - id: foo - body: > - { - "index_pattern": "foo", - "rollup_index": "foo_rollup", - "cron": "*/30 * * * * ?", - "page_size" :10, - "groups" : { - "date_histogram": { - "field": "the_field", - "calendar_interval": "1h" - } - }, - "metrics": [ - { - "field": "value_field", - "metrics": ["min", "max", "sum"] - } - ] - } - - is_true: acknowledged - - - do: - headers: - Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - rollup.put_job: - id: bar - body: > - { - "index_pattern": "bar", - "rollup_index": "foo_rollup", - "cron": "*/30 * * * * ?", - "page_size" :10, - "groups" : { - "date_histogram": { - "field": "the_field", - "calendar_interval": "1h" - } - }, - "metrics": [ - { - "field": "value_field", - "metrics": ["min", "max", "sum"] - } - ] - } - - is_true: acknowledged - - - do: - rollup.get_jobs: - id: "_all" - - - length: { jobs: 2 } - - match: - jobs: - - config: - id: "foo" - index_pattern: "foo" - rollup_index: "foo_rollup" - cron: "*/30 * * * * ?" - page_size: 10 - groups : - date_histogram: - calendar_interval: "1h" - field: "the_field" - time_zone: "UTC" - metrics: - - field: "value_field" - metrics: - - "min" - - "max" - - "sum" - timeout: "20s" - stats: - pages_processed: 0 - documents_processed: 0 - rollups_indexed: 0 - trigger_count: 0 - status: - job_state: "stopped" - - config: - id: "bar" - index_pattern: "bar" - rollup_index: "foo_rollup" - cron: "*/30 * * * * ?" - page_size: 10 - groups : - date_histogram: - calendar_interval: "1h" - field: "the_field" - time_zone: "UTC" - metrics: - - field: "value_field" - metrics: - - "min" - - "max" - - "sum" - timeout: "20s" - stats: - pages_processed: 0 - documents_processed: 0 - rollups_indexed: 0 - trigger_count: 0 - search_failures: 0 - index_failures: 0 - index_time_in_ms: 0 - index_total: 0 - search_time_in_ms: 0 - search_total: 0 - processing_time_in_ms: 0 - processing_total: 0 - status: - job_state: "stopped" - - diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointNodeAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointNodeAction.java index 481fe40a764a6..177f00c704c3c 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointNodeAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointNodeAction.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.transform.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -34,6 +36,7 @@ public class TransportGetCheckpointNodeAction extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportGetCheckpointNodeAction.class); private final IndicesService indicesService; @Inject @@ -83,17 +86,27 @@ protected static void getGlobalCheckpoints( return; } } - final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - final IndexShard indexShard = indexService.getShard(shardId.id()); - checkpointsByIndexOfThisNode.computeIfAbsent(shardId.getIndexName(), k -> { - long[] seqNumbers = new long[indexService.getIndexSettings().getNumberOfShards()]; - Arrays.fill(seqNumbers, SequenceNumbers.UNASSIGNED_SEQ_NO); - return seqNumbers; - }); - checkpointsByIndexOfThisNode.get(shardId.getIndexName())[shardId.getId()] = indexShard.seqNoStats().getGlobalCheckpoint(); - ++numProcessedShards; + try { + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.id()); + + checkpointsByIndexOfThisNode.computeIfAbsent(shardId.getIndexName(), k -> { + long[] seqNumbers = new long[indexService.getIndexSettings().getNumberOfShards()]; + Arrays.fill(seqNumbers, SequenceNumbers.UNASSIGNED_SEQ_NO); + return seqNumbers; + }); + checkpointsByIndexOfThisNode.get(shardId.getIndexName())[shardId.getId()] = indexShard.seqNoStats().getGlobalCheckpoint(); + ++numProcessedShards; + } catch (Exception e) { + logger.atDebug() + .withThrowable(e) + .log("Failed to get checkpoint for shard [{}] and index [{}]", shardId.getId(), shardId.getIndexName()); + listener.onFailure(e); + return; + } } + listener.onResponse(new Response(checkpointsByIndexOfThisNode)); } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointNodeActionTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointNodeActionTests.java index 25c7f9efa7992..950e593165f01 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointNodeActionTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointNodeActionTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.seqno.SeqNoStats; @@ -47,6 +48,8 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -68,35 +71,9 @@ public void setUp() throws Exception { null, (TaskManager) null ); - IndexShard indexShardA0 = mock(IndexShard.class); - when(indexShardA0.seqNoStats()).thenReturn(new SeqNoStats(3_000, 2_000, 3_000)); - IndexShard indexShardA1 = mock(IndexShard.class); - when(indexShardA1.seqNoStats()).thenReturn(new SeqNoStats(3_000, 2_000, 3_001)); - IndexShard indexShardB0 = mock(IndexShard.class); - when(indexShardB0.seqNoStats()).thenReturn(new SeqNoStats(3_000, 2_000, 4_000)); - IndexShard indexShardB1 = mock(IndexShard.class); - when(indexShardB1.seqNoStats()).thenReturn(new SeqNoStats(3_000, 2_000, 4_001)); - Settings commonIndexSettings = Settings.builder() - .put(SETTING_VERSION_CREATED, 1_000_000) - .put(SETTING_NUMBER_OF_SHARDS, 2) - .put(SETTING_NUMBER_OF_REPLICAS, 1) - .build(); - IndexService indexServiceA = mock(IndexService.class); - when(indexServiceA.getIndexSettings()).thenReturn( - new IndexSettings(IndexMetadata.builder("my-index-A").settings(commonIndexSettings).build(), Settings.EMPTY) - ); - when(indexServiceA.getShard(0)).thenReturn(indexShardA0); - when(indexServiceA.getShard(1)).thenReturn(indexShardA1); - IndexService indexServiceB = mock(IndexService.class); - when(indexServiceB.getIndexSettings()).thenReturn( - new IndexSettings(IndexMetadata.builder("my-index-B").settings(commonIndexSettings).build(), Settings.EMPTY) - ); - when(indexServiceB.getShard(0)).thenReturn(indexShardB0); - when(indexServiceB.getShard(1)).thenReturn(indexShardB1); + indicesService = mock(IndicesService.class); when(indicesService.clusterService()).thenReturn(clusterService); - when(indicesService.indexServiceSafe(new Index("my-index-A", "A"))).thenReturn(indexServiceA); - when(indicesService.indexServiceSafe(new Index("my-index-B", "B"))).thenReturn(indexServiceB); task = new CancellableTask(123, "type", "action", "description", new TaskId("dummy-node:456"), Map.of()); clock = new FakeClock(Instant.now()); @@ -117,6 +94,7 @@ public void testGetGlobalCheckpointsWithHighTimeout() throws InterruptedExceptio } private void testGetGlobalCheckpointsSuccess(TimeValue timeout) throws InterruptedException { + mockIndexServiceResponse(); CountDownLatch latch = new CountDownLatch(1); SetOnce responseHolder = new SetOnce<>(); SetOnce exceptionHolder = new SetOnce<>(); @@ -136,7 +114,38 @@ private void testGetGlobalCheckpointsSuccess(TimeValue timeout) throws Interrupt assertThat(exceptionHolder.get(), is(nullValue())); } + private void mockIndexServiceResponse() { + IndexShard indexShardA0 = mock(IndexShard.class); + when(indexShardA0.seqNoStats()).thenReturn(new SeqNoStats(3_000, 2_000, 3_000)); + IndexShard indexShardA1 = mock(IndexShard.class); + when(indexShardA1.seqNoStats()).thenReturn(new SeqNoStats(3_000, 2_000, 3_001)); + IndexShard indexShardB0 = mock(IndexShard.class); + when(indexShardB0.seqNoStats()).thenReturn(new SeqNoStats(3_000, 2_000, 4_000)); + IndexShard indexShardB1 = mock(IndexShard.class); + when(indexShardB1.seqNoStats()).thenReturn(new SeqNoStats(3_000, 2_000, 4_001)); + Settings commonIndexSettings = Settings.builder() + .put(SETTING_VERSION_CREATED, 1_000_000) + .put(SETTING_NUMBER_OF_SHARDS, 2) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + IndexService indexServiceA = mock(IndexService.class); + when(indexServiceA.getIndexSettings()).thenReturn( + new IndexSettings(IndexMetadata.builder("my-index-A").settings(commonIndexSettings).build(), Settings.EMPTY) + ); + when(indexServiceA.getShard(0)).thenReturn(indexShardA0); + when(indexServiceA.getShard(1)).thenReturn(indexShardA1); + IndexService indexServiceB = mock(IndexService.class); + when(indexServiceB.getIndexSettings()).thenReturn( + new IndexSettings(IndexMetadata.builder("my-index-B").settings(commonIndexSettings).build(), Settings.EMPTY) + ); + when(indexServiceB.getShard(0)).thenReturn(indexShardB0); + when(indexServiceB.getShard(1)).thenReturn(indexShardB1); + when(indicesService.indexServiceSafe(new Index("my-index-A", "A"))).thenReturn(indexServiceA); + when(indicesService.indexServiceSafe(new Index("my-index-B", "B"))).thenReturn(indexServiceB); + } + public void testGetGlobalCheckpointsFailureDueToTaskCancelled() throws InterruptedException { + mockIndexServiceResponse(); TaskCancelHelper.cancel(task, "due to apocalypse"); CountDownLatch latch = new CountDownLatch(1); @@ -156,6 +165,7 @@ public void testGetGlobalCheckpointsFailureDueToTaskCancelled() throws Interrupt } public void testGetGlobalCheckpointsFailureDueToTimeout() throws InterruptedException { + mockIndexServiceResponse(); // Move the current time past the timeout. clock.advanceTimeBy(Duration.ofSeconds(10)); @@ -184,4 +194,24 @@ public void testGetGlobalCheckpointsFailureDueToTimeout() throws InterruptedExce is(equalTo("Transform checkpointing timed out on node [dummy-node] after [5s] having processed [0] of [4] shards")) ); } + + public void testIndexNotFoundException() throws InterruptedException { + var expectedException = new IndexNotFoundException("some index"); + when(indicesService.indexServiceSafe(any())).thenThrow(expectedException); + + var exceptionHolder = new SetOnce(); + TransportGetCheckpointNodeAction.getGlobalCheckpoints( + indicesService, + task, + shards, + TimeValue.timeValueSeconds(5), + clock, + ActionListener.wrap(r -> { + fail("Test is meant to call the onFailure method."); + }, exceptionHolder::set) + ); + + assertNotNull("Listener's onFailure handler was not called.", exceptionHolder.get()); + assertThat(exceptionHolder.get(), sameInstance(expectedException)); + } } diff --git a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java index a07544ff68c9a..69709d638a771 100644 --- a/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java +++ b/x-pack/plugin/wildcard/src/main/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapper.java @@ -1061,5 +1061,10 @@ public void write(XContentBuilder b) throws IOException { } storedValues = emptyList(); } + + @Override + public String fieldName() { + return name(); + } } } diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java index 3d9e7f3828bc7..17363d58545c2 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java @@ -20,11 +20,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslVerificationMode; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.fixtures.smb.SmbTestContainer; import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; @@ -187,11 +190,16 @@ public void setupRoleMappings() throws Exception { Map> futures = Maps.newLinkedHashMapWithExpectedSize(content.size()); for (int i = 0; i < content.size(); i++) { final String name = "external_" + i; - final PutRoleMappingRequestBuilder builder = new PutRoleMappingRequestBuilder(client()).source( - name, - new BytesArray(content.get(i)), - XContentType.JSON - ); + final PutRoleMappingRequestBuilder builder; + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + LoggingDeprecationHandler.XCONTENT_PARSER_CONFIG, + new BytesArray(content.get(i)), + XContentType.JSON + ) + ) { + builder = new PutRoleMappingRequestBuilder(client()).source(name, parser); + } futures.put(name, builder.execute()); } for (String mappingName : futures.keySet()) {