diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 3283e691f121c..cb8062fef02b4 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] + BWC_VERSION: ["7.17.20", "8.13.1", "8.14.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 5e7c1a0960789..9992d940e3c97 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1137,6 +1137,22 @@ steps: env: BWC_VERSION: 7.17.19 + - label: "{{matrix.image}} / 7.17.20 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.20 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.20 + - label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0 timeout_in_minutes: 300 @@ -1873,8 +1889,8 @@ steps: env: BWC_VERSION: 8.12.2 - - label: "{{matrix.image}} / 8.12.3 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.3 + - label: "{{matrix.image}} / 8.13.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.0 timeout_in_minutes: 300 matrix: setup: @@ -1887,10 +1903,10 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.12.3 + BWC_VERSION: 8.13.0 - - label: "{{matrix.image}} / 8.13.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.0 + - label: "{{matrix.image}} / 8.13.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.1 timeout_in_minutes: 300 matrix: setup: @@ -1903,7 +1919,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.13.0 + BWC_VERSION: 8.13.1 - label: "{{matrix.image}} / 8.14.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.14.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 8e1ff14eda792..ff378477f7aa6 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -692,6 +692,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.19 + - label: 7.17.20 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.20#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.20 - label: 8.0.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest timeout_in_minutes: 300 @@ -1152,8 +1162,8 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.12.2 - - label: 8.12.3 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.3#bwcTest + - label: 8.13.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1161,9 +1171,9 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.12.3 - - label: 8.13.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.0#bwcTest + BWC_VERSION: 8.13.0 + - label: 8.13.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1171,7 +1181,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.13.0 + BWC_VERSION: 8.13.1 - label: 8.14.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.14.0#bwcTest timeout_in_minutes: 300 @@ -1246,7 +1256,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] + BWC_VERSION: ["7.17.20", "8.13.1", "8.14.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -1290,7 +1300,7 @@ steps: - openjdk17 - openjdk21 - openjdk22 - BWC_VERSION: ["7.17.19", "8.12.3", "8.13.0", "8.14.0"] + BWC_VERSION: ["7.17.20", "8.13.1", "8.14.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 8b454fa92ab02..a655b5a862683 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -68,6 +68,7 @@ BWC_VERSION: - "7.17.17" - "7.17.18" - "7.17.19" + - "7.17.20" - "8.0.0" - "8.0.1" - "8.1.0" @@ -114,6 +115,6 @@ BWC_VERSION: - "8.12.0" - "8.12.1" - "8.12.2" - - "8.12.3" - "8.13.0" + - "8.13.1" - "8.14.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index d85a432684495..f31603772a7f7 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - - "7.17.19" - - "8.12.3" - - "8.13.0" + - "7.17.20" + - "8.13.1" - "8.14.0" diff --git a/branches.json b/branches.json index dc72956c13f80..772693505b9e0 100644 --- a/branches.json +++ b/branches.json @@ -7,9 +7,6 @@ { "branch": "8.13" }, - { - "branch": "8.12" - }, { "branch": "7.17" } diff --git a/docs/changelog/100740.yaml b/docs/changelog/100740.yaml deleted file mode 100644 index c93fbf676ef81..0000000000000 --- a/docs/changelog/100740.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100740 -summary: "ESQL: Referencing expressions that contain backticks requires <>." -area: ES|QL -type: enhancement -issues: - - 100312 diff --git a/docs/changelog/100813.yaml b/docs/changelog/100813.yaml deleted file mode 100644 index 476098b62c106..0000000000000 --- a/docs/changelog/100813.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100813 -summary: Make `ParentTaskAssigningClient.getRemoteClusterClient` method also return - `ParentTaskAssigningClient` -area: Infra/Transport API -type: enhancement -issues: [] diff --git a/docs/changelog/101209.yaml b/docs/changelog/101209.yaml deleted file mode 100644 index debec27e61307..0000000000000 --- a/docs/changelog/101209.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101209 -summary: "Making `k` and `num_candidates` optional for knn search" -area: Vector Search -type: enhancement -issues: - - 97533 diff --git a/docs/changelog/101487.yaml b/docs/changelog/101487.yaml deleted file mode 100644 index b4531f7fd6f75..0000000000000 --- a/docs/changelog/101487.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101487 -summary: Wait for async searches to finish when shutting down -area: Infra/Node Lifecycle -type: enhancement -issues: [] diff --git a/docs/changelog/101640.yaml b/docs/changelog/101640.yaml deleted file mode 100644 index 6f61a3a3ffd84..0000000000000 --- a/docs/changelog/101640.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101640 -summary: Support cross clusters query in ESQL -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101656.yaml b/docs/changelog/101656.yaml deleted file mode 100644 index 7cd4f30cae849..0000000000000 --- a/docs/changelog/101656.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101656 -summary: Adjust interception of requests for specific shard IDs -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/101717.yaml b/docs/changelog/101717.yaml deleted file mode 100644 index 7e97ef1049f88..0000000000000 --- a/docs/changelog/101717.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101717 -summary: Pause shard snapshots on graceful shutdown -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/101872.yaml b/docs/changelog/101872.yaml deleted file mode 100644 index 1c63c2d8b009a..0000000000000 --- a/docs/changelog/101872.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101872 -summary: "Add `require_data_stream` parameter to indexing requests to enforce indexing operations target a data stream" -area: Data streams -type: feature -issues: - - 97032 diff --git a/docs/changelog/102078.yaml b/docs/changelog/102078.yaml deleted file mode 100644 index d031aa0dbf6f7..0000000000000 --- a/docs/changelog/102078.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102078 -summary: Derive expected replica size from primary -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/102207.yaml b/docs/changelog/102207.yaml deleted file mode 100644 index 8b247828845f4..0000000000000 --- a/docs/changelog/102207.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102207 -summary: Fix disk computation when initializing unassigned shards in desired balance - computation -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/102371.yaml b/docs/changelog/102371.yaml deleted file mode 100644 index 5a698bc9d671a..0000000000000 --- a/docs/changelog/102371.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102371 -summary: Adding threadpool metrics -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/102428.yaml b/docs/changelog/102428.yaml deleted file mode 100644 index 275492fa6a888..0000000000000 --- a/docs/changelog/102428.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102428 -summary: "ESQL: Add option to drop null fields" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102435.yaml b/docs/changelog/102435.yaml deleted file mode 100644 index e8905b08f1adc..0000000000000 --- a/docs/changelog/102435.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102435 -summary: S3 first byte latency metric -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/102557.yaml b/docs/changelog/102557.yaml deleted file mode 100644 index dfca1763064d4..0000000000000 --- a/docs/changelog/102557.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102557 -summary: Metrics for search latencies -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/102559.yaml b/docs/changelog/102559.yaml deleted file mode 100644 index ad0867ab087b9..0000000000000 --- a/docs/changelog/102559.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102559 -summary: "Prune unnecessary information from TransportNodesStatsAction.NodeStatsRequest" -area: Network -type: enhancement -issues: [100878] diff --git a/docs/changelog/102584.yaml b/docs/changelog/102584.yaml deleted file mode 100644 index 44ff5dd9f7461..0000000000000 --- a/docs/changelog/102584.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102584 -summary: Expose some ML metrics via APM -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102726.yaml b/docs/changelog/102726.yaml deleted file mode 100644 index bc5b311481123..0000000000000 --- a/docs/changelog/102726.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102726 -summary: Resolve Cluster API -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/102759.yaml b/docs/changelog/102759.yaml deleted file mode 100644 index 1c002ef2b678e..0000000000000 --- a/docs/changelog/102759.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102759 -summary: Close rather than stop `HttpServerTransport` on shutdown -area: Infra/Node Lifecycle -type: bug -issues: - - 102501 diff --git a/docs/changelog/102765.yaml b/docs/changelog/102765.yaml deleted file mode 100644 index eb73da2650542..0000000000000 --- a/docs/changelog/102765.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102765 -summary: "Add APM metrics to `HealthPeriodicLogger`" -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/102782.yaml b/docs/changelog/102782.yaml deleted file mode 100644 index ed0a004765859..0000000000000 --- a/docs/changelog/102782.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102782 -summary: Upgrade to Lucene 9.9.0 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/102798.yaml b/docs/changelog/102798.yaml deleted file mode 100644 index 986ad99f96a19..0000000000000 --- a/docs/changelog/102798.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102798 -summary: Hot-reloadable remote cluster credentials -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/102824.yaml b/docs/changelog/102824.yaml deleted file mode 100644 index 21b39a4c3999d..0000000000000 --- a/docs/changelog/102824.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102824 -summary: Change detection aggregation improvements -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102862.yaml b/docs/changelog/102862.yaml deleted file mode 100644 index bb453163009d5..0000000000000 --- a/docs/changelog/102862.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102862 -summary: Add optional pruning configuration (weighted terms scoring) to text expansion query -area: "Machine Learning" -type: enhancement -issues: [] diff --git a/docs/changelog/102879.yaml b/docs/changelog/102879.yaml deleted file mode 100644 index b35d36dd0a3a9..0000000000000 --- a/docs/changelog/102879.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102879 -summary: Fix disk computation when initializing new shards -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/102885.yaml b/docs/changelog/102885.yaml deleted file mode 100644 index 7a998c3eb1f66..0000000000000 --- a/docs/changelog/102885.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102885 -summary: Make field limit more predictable -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/103025.yaml b/docs/changelog/103025.yaml deleted file mode 100644 index 856a7c022d5dd..0000000000000 --- a/docs/changelog/103025.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103025 -summary: "Metrics: Allow `AsyncCounters` to switch providers" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/103032.yaml b/docs/changelog/103032.yaml deleted file mode 100644 index 81d84fca0bdb0..0000000000000 --- a/docs/changelog/103032.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103032 -summary: "x-pack/plugin/apm-data: Map some APM fields as flattened and fix error.grouping_name script" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/103033.yaml b/docs/changelog/103033.yaml deleted file mode 100644 index 30f8e182b9998..0000000000000 --- a/docs/changelog/103033.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103033 -summary: "X-pack/plugin/core: rename `double_metrics` template" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/103035.yaml b/docs/changelog/103035.yaml deleted file mode 100644 index 5b1c9d6629767..0000000000000 --- a/docs/changelog/103035.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103035 -summary: "x-pack/plugin/core: add `match_mapping_type` to `ecs@mappings` dynamic templates" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/103084.yaml b/docs/changelog/103084.yaml deleted file mode 100644 index fb5a718a086de..0000000000000 --- a/docs/changelog/103084.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103084 -summary: Return `matched_queries` in Percolator -area: Percolator -type: enhancement -issues: - - 10163 diff --git a/docs/changelog/103091.yaml b/docs/changelog/103091.yaml deleted file mode 100644 index ae4ac11933d4e..0000000000000 --- a/docs/changelog/103091.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103091 -summary: "Metrics: Handle null observations in observers" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/103099.yaml b/docs/changelog/103099.yaml deleted file mode 100644 index c3fd3f9d7b8e4..0000000000000 --- a/docs/changelog/103099.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103099 -summary: "ESQL: Simpify IS NULL/IS NOT NULL evaluation" -area: ES|QL -type: enhancement -issues: - - 103097 diff --git a/docs/changelog/103130.yaml b/docs/changelog/103130.yaml deleted file mode 100644 index 3ef56ae84d123..0000000000000 --- a/docs/changelog/103130.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103130 -summary: Create a DSL health indicator as part of the health API -area: Health -type: feature -issues: [] diff --git a/docs/changelog/103160.yaml b/docs/changelog/103160.yaml deleted file mode 100644 index 7701aa2b4a8d4..0000000000000 --- a/docs/changelog/103160.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103160 -summary: Set thread name used by REST client -area: Java Low Level REST Client -type: enhancement -issues: [] diff --git a/docs/changelog/103171.yaml b/docs/changelog/103171.yaml deleted file mode 100644 index 95ad6a1ea77c2..0000000000000 --- a/docs/changelog/103171.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 103171 -summary: "Add `unmatch_mapping_type`, and support array of types" -area: Mapping -type: feature -issues: - - 102807 - - 102795 diff --git a/docs/changelog/103176.yaml b/docs/changelog/103176.yaml deleted file mode 100644 index a0f46c1462f62..0000000000000 --- a/docs/changelog/103176.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103176 -summary: Validate settings in `ReloadSecureSettings` API -area: Client -type: bug -issues: [] diff --git a/docs/changelog/103178.yaml b/docs/changelog/103178.yaml deleted file mode 100644 index 5da0221a68984..0000000000000 --- a/docs/changelog/103178.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103178 -summary: Expose API key authentication metrics -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/103190.yaml b/docs/changelog/103190.yaml deleted file mode 100644 index 5e6927d3eadd7..0000000000000 --- a/docs/changelog/103190.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103190 -summary: ILM/SLM history policies forcemerge in hot and dsl configuration -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/103223.yaml b/docs/changelog/103223.yaml deleted file mode 100644 index c2f4c1b6a2cf4..0000000000000 --- a/docs/changelog/103223.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 103223 -summary: "[Synonyms] Mark Synonyms as GA" -area: "Search" -type: feature -issues: [] -highlight: - title: "GA Release of Synonyms API" - body: |- - Removes the beta label for the Synonyms API to make it GA. - notable: true diff --git a/docs/changelog/103232.yaml b/docs/changelog/103232.yaml deleted file mode 100644 index b955e7abb7683..0000000000000 --- a/docs/changelog/103232.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103232 -summary: "Remove leniency in msearch parsing" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/103300.yaml b/docs/changelog/103300.yaml deleted file mode 100644 index a536a673b7827..0000000000000 --- a/docs/changelog/103300.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103300 -summary: Retry indefinitely for s3 indices blob read errors -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/103309.yaml b/docs/changelog/103309.yaml deleted file mode 100644 index 94b2a31127870..0000000000000 --- a/docs/changelog/103309.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103309 -summary: Introduce lazy rollover for mapping updates in data streams -area: Data streams -type: enhancement -issues: - - 89346 diff --git a/docs/changelog/103310.yaml b/docs/changelog/103310.yaml deleted file mode 100644 index a7a0746b6b8c4..0000000000000 --- a/docs/changelog/103310.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103310 -summary: Revert "Validate settings in `ReloadSecureSettings` API" -area: Security -type: bug -issues: [] diff --git a/docs/changelog/103316.yaml b/docs/changelog/103316.yaml deleted file mode 100644 index 47eddcc34d924..0000000000000 --- a/docs/changelog/103316.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103316 -summary: Review KEEP logic to prevent duplicate column names -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/103325.yaml b/docs/changelog/103325.yaml deleted file mode 100644 index 7de6c41986490..0000000000000 --- a/docs/changelog/103325.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103325 -summary: Added Duplicate Word Check Feature to Analysis Nori -area: Search -type: feature -issues: - - 103321 diff --git a/docs/changelog/103340.yaml b/docs/changelog/103340.yaml deleted file mode 100644 index 21280dbfc857d..0000000000000 --- a/docs/changelog/103340.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103340 -summary: Avoid humongous blocks -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103387.yaml b/docs/changelog/103387.yaml deleted file mode 100644 index 77239fb9a3778..0000000000000 --- a/docs/changelog/103387.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103387 -summary: Upgrade to Lucene 9.9.1 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/103398.yaml b/docs/changelog/103398.yaml deleted file mode 100644 index 69452616ddc99..0000000000000 --- a/docs/changelog/103398.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103398 -summary: ES|QL Async Query API -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103399.yaml b/docs/changelog/103399.yaml deleted file mode 100644 index 440ac90b313f5..0000000000000 --- a/docs/changelog/103399.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103399 -summary: "add validation on _id field when upsert new doc" -area: Search -type: bug -issues: - - 102981 diff --git a/docs/changelog/103434.yaml b/docs/changelog/103434.yaml deleted file mode 100644 index 56af604fe08f7..0000000000000 --- a/docs/changelog/103434.yaml +++ /dev/null @@ -1,11 +0,0 @@ -pr: 103434 -summary: Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours. -area: TSDB -type: breaking -issues: [] -breaking: - title: Lower the `look_ahead_time` index setting's max value - area: Index setting - details: "Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours." - impact: "Any value between 2 hours and 7 days will be as a look ahead time of 2 hours is defined" - notable: false diff --git a/docs/changelog/103453.yaml b/docs/changelog/103453.yaml deleted file mode 100644 index 4b7dab77c8b23..0000000000000 --- a/docs/changelog/103453.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103453 -summary: Add expiration time to update api key api -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/103461.yaml b/docs/changelog/103461.yaml deleted file mode 100644 index 3a1bf30aa90c9..0000000000000 --- a/docs/changelog/103461.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103461 -summary: Add support for Well Known Binary (WKB) in the fields API for spatial fields -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/103481.yaml b/docs/changelog/103481.yaml deleted file mode 100644 index f7c7c0b6eecc9..0000000000000 --- a/docs/changelog/103481.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103481 -summary: Redirect failed ingest node operations to a failure store when available -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/103510.yaml b/docs/changelog/103510.yaml deleted file mode 100644 index 50ec8efd5c440..0000000000000 --- a/docs/changelog/103510.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103510 -summary: "ES|QL: better management of exact subfields for TEXT fields" -area: ES|QL -type: bug -issues: - - 99899 diff --git a/docs/changelog/103520.yaml b/docs/changelog/103520.yaml deleted file mode 100644 index 0ef7124eb1ed2..0000000000000 --- a/docs/changelog/103520.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103520 -summary: Request indexing memory pressure in APM node metrics publisher -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/103535.yaml b/docs/changelog/103535.yaml deleted file mode 100644 index 80cf6e1ea709a..0000000000000 --- a/docs/changelog/103535.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103535 -summary: Add replay diagnostic dir to system jvm options -area: Infra/CLI -type: enhancement -issues: [] diff --git a/docs/changelog/103538.yaml b/docs/changelog/103538.yaml deleted file mode 100644 index 5aaed771d5ee4..0000000000000 --- a/docs/changelog/103538.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103538 -summary: "ESQL: Improve pushdown of certain filters" -area: ES|QL -type: bug -issues: - - 103536 diff --git a/docs/changelog/103555.yaml b/docs/changelog/103555.yaml deleted file mode 100644 index 2b0dc2692e252..0000000000000 --- a/docs/changelog/103555.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103555 -summary: "[Security Solution] Allow write permission for `kibana_system` role on endpoint\ - \ response index" -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/103592.yaml b/docs/changelog/103592.yaml deleted file mode 100644 index 21e06f1f5a10d..0000000000000 --- a/docs/changelog/103592.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103592 -summary: Remove deprecated Block APIs -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103610.yaml b/docs/changelog/103610.yaml deleted file mode 100644 index 1ed38cc2822bd..0000000000000 --- a/docs/changelog/103610.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103610 -summary: "ESQL: allow `null` in date math" -area: ES|QL -type: bug -issues: - - 103085 diff --git a/docs/changelog/103627.yaml b/docs/changelog/103627.yaml deleted file mode 100644 index 4b0d9e937542e..0000000000000 --- a/docs/changelog/103627.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103627 -summary: Add gradle tasks and code to modify and access mappings between version ids and release versions -area: Infra/Core -type: feature -issues: [] diff --git a/docs/changelog/103628.yaml b/docs/changelog/103628.yaml deleted file mode 100644 index 42259c7bcde46..0000000000000 --- a/docs/changelog/103628.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103628 -summary: Add ES|QL async delete API -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103632.yaml b/docs/changelog/103632.yaml deleted file mode 100644 index 1d83c6528f371..0000000000000 --- a/docs/changelog/103632.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103632 -summary: "ESQL: Check field exists before load from `_source`" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103633.yaml b/docs/changelog/103633.yaml deleted file mode 100644 index 9e36451caafd8..0000000000000 --- a/docs/changelog/103633.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103633 -summary: Update s3 latency metric to use micros -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/103643.yaml b/docs/changelog/103643.yaml deleted file mode 100644 index 966fb57acf566..0000000000000 --- a/docs/changelog/103643.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103643 -summary: "[Profiling] Use shard request cache consistently" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/103646.yaml b/docs/changelog/103646.yaml deleted file mode 100644 index b7a6fae025771..0000000000000 --- a/docs/changelog/103646.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103646 -summary: Add index mapping parameter for `counted_keyword` -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/103648.yaml b/docs/changelog/103648.yaml deleted file mode 100644 index d4fa489a6812c..0000000000000 --- a/docs/changelog/103648.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103648 -summary: Introduce experimental pass-through field type -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/103651.yaml b/docs/changelog/103651.yaml deleted file mode 100644 index 1106044b31fd2..0000000000000 --- a/docs/changelog/103651.yaml +++ /dev/null @@ -1,12 +0,0 @@ -pr: 103651 -summary: Flag in `_field_caps` to return only fields with values in index -area: Search -type: enhancement -issues: [] -highlight: - title: Flag in `_field_caps` to return only fields with values in index - body: |- - We added support for filtering the field capabilities API output by removing - fields that don't have a value. This can be done through the newly added - `include_empty_fields` parameter, which defaults to true. - notable: true diff --git a/docs/changelog/103656.yaml b/docs/changelog/103656.yaml deleted file mode 100644 index 24bd8814029ff..0000000000000 --- a/docs/changelog/103656.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103656 -summary: "ESQL: add =~ operator (case insensitive equality)" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/103669.yaml b/docs/changelog/103669.yaml deleted file mode 100644 index 57361b9d842e4..0000000000000 --- a/docs/changelog/103669.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103669 -summary: Validate inference model ids -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/103673.yaml b/docs/changelog/103673.yaml deleted file mode 100644 index f786b57eba411..0000000000000 --- a/docs/changelog/103673.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103673 -summary: "ESQL: Infer not null for aggregated fields" -area: ES|QL -type: enhancement -issues: - - 102787 diff --git a/docs/changelog/103681.yaml b/docs/changelog/103681.yaml deleted file mode 100644 index bba73c8e3a7d4..0000000000000 --- a/docs/changelog/103681.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103681 -summary: "ESQL: Expand shallow copy with vecs" -area: ES|QL -type: enhancement -issues: - - 100528 diff --git a/docs/changelog/103682.yaml b/docs/changelog/103682.yaml deleted file mode 100644 index 109e77dd053a5..0000000000000 --- a/docs/changelog/103682.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103682 -summary: Use deduced mappings for determining proper fields' format even if `deduce_mappings==false` -area: Transform -type: bug -issues: - - 103115 diff --git a/docs/changelog/103698.yaml b/docs/changelog/103698.yaml deleted file mode 100644 index d94b70b54e505..0000000000000 --- a/docs/changelog/103698.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103698 -summary: Reading points from source to reduce precision loss -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103710.yaml b/docs/changelog/103710.yaml deleted file mode 100644 index 539b9f553ccc2..0000000000000 --- a/docs/changelog/103710.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103710 -summary: List hidden shard stores by default -area: Store -type: enhancement -issues: [] diff --git a/docs/changelog/103720.yaml b/docs/changelog/103720.yaml deleted file mode 100644 index e0ee879988fa7..0000000000000 --- a/docs/changelog/103720.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103720 -summary: Add "step":"ERROR" to ILM explain response for missing policy -area: ILM+SLM -type: enhancement -issues: - - 99030 diff --git a/docs/changelog/103727.yaml b/docs/changelog/103727.yaml deleted file mode 100644 index f943ee7906d58..0000000000000 --- a/docs/changelog/103727.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103727 -summary: "ESQL: Track the rest of `DocVector`" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103763.yaml b/docs/changelog/103763.yaml deleted file mode 100644 index e4d6556c77077..0000000000000 --- a/docs/changelog/103763.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103763 -summary: Ref count search response bytes -area: Search -type: enhancement -issues: - - 102657 diff --git a/docs/changelog/103783.yaml b/docs/changelog/103783.yaml deleted file mode 100644 index 47c32dd639310..0000000000000 --- a/docs/changelog/103783.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103783 -summary: "[Profiling] Mark all templates as managed" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/103807.yaml b/docs/changelog/103807.yaml deleted file mode 100644 index 3849edcc00ced..0000000000000 --- a/docs/changelog/103807.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103807 -summary: "ESQL: Add single value checks on LIKE/RLIKE pushdown" -area: ES|QL -type: bug -issues: - - 103806 diff --git a/docs/changelog/103821.yaml b/docs/changelog/103821.yaml deleted file mode 100644 index 3279059acbe3e..0000000000000 --- a/docs/changelog/103821.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103821 -summary: "ESQL: Delay finding field load infrastructure" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103846.yaml b/docs/changelog/103846.yaml deleted file mode 100644 index 0d34efabc0278..0000000000000 --- a/docs/changelog/103846.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103846 -summary: Support sampling in `counted_terms` aggregation -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/103898.yaml b/docs/changelog/103898.yaml deleted file mode 100644 index 73d89e49e8812..0000000000000 --- a/docs/changelog/103898.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 103898 -summary: Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. -area: TSDB -type: breaking -issues: [] -breaking: - title: Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. - area: Index setting - details: Lower the `index.look_ahead_time` index setting's max value from 2 hours to 30 minutes. - impact: > - Documents with @timestamp of 30 minutes or more in the future will be rejected. - Before documents with @timestamp of 2 hours or more in the future were rejected. - If the previous behaviour should be kept, then update the `index.look_ahead_time` setting to two hours before performing the upgrade. - notable: false diff --git a/docs/changelog/103903.yaml b/docs/changelog/103903.yaml deleted file mode 100644 index c2e5e710ac439..0000000000000 --- a/docs/changelog/103903.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103903 -summary: Account for reserved disk size -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/103920.yaml b/docs/changelog/103920.yaml deleted file mode 100644 index c4a0d3b06fc82..0000000000000 --- a/docs/changelog/103920.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103920 -summary: Use search to determine if cluster contains data -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103922.yaml b/docs/changelog/103922.yaml deleted file mode 100644 index 4181a6e6b1e8a..0000000000000 --- a/docs/changelog/103922.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103922 -summary: Always test for spikes and dips as well as changes in the change point aggregation -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/103928.yaml b/docs/changelog/103928.yaml deleted file mode 100644 index a9e60ba33a686..0000000000000 --- a/docs/changelog/103928.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103928 -summary: "ESQL: `MV_FIRST` and `MV_LAST`" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/103948.yaml b/docs/changelog/103948.yaml deleted file mode 100644 index 3247183fc97bb..0000000000000 --- a/docs/changelog/103948.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103948 -summary: '''elasticsearch-certutil cert'' now verifies the issuing chain of the generated - certificate' -area: TLS -type: enhancement -issues: [] diff --git a/docs/changelog/103949.yaml b/docs/changelog/103949.yaml deleted file mode 100644 index 96bd76d89ceae..0000000000000 --- a/docs/changelog/103949.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103949 -summary: "ESQL: Introduce mode setting for ENRICH" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/103959.yaml b/docs/changelog/103959.yaml deleted file mode 100644 index 4c8b4413b95f8..0000000000000 --- a/docs/changelog/103959.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103959 -summary: Add `ApiKey` expiration time to audit log -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/103973.yaml b/docs/changelog/103973.yaml deleted file mode 100644 index f3bde76c7a559..0000000000000 --- a/docs/changelog/103973.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103973 -summary: Add stricter validation for api key expiration time -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/103996.yaml b/docs/changelog/103996.yaml deleted file mode 100644 index 699b93fff4f03..0000000000000 --- a/docs/changelog/103996.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103996 -summary: Ensure unique IDs between inference models and trained model deployments -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104006.yaml b/docs/changelog/104006.yaml deleted file mode 100644 index d840502cdefbe..0000000000000 --- a/docs/changelog/104006.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104006 -summary: Add support for more than one `inner_hit` when searching nested vectors -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/104030.yaml b/docs/changelog/104030.yaml deleted file mode 100644 index 8fe30e6258653..0000000000000 --- a/docs/changelog/104030.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104030 -summary: Add the possibility to transform WKT to WKB directly -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/104033.yaml b/docs/changelog/104033.yaml deleted file mode 100644 index d3e167665732c..0000000000000 --- a/docs/changelog/104033.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104033 -summary: Add Query Users API -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/104043.yaml b/docs/changelog/104043.yaml deleted file mode 100644 index 86032e52fe208..0000000000000 --- a/docs/changelog/104043.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104043 -summary: Expose service account authentication metrics -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/104063.yaml b/docs/changelog/104063.yaml deleted file mode 100644 index 5f59022472c75..0000000000000 --- a/docs/changelog/104063.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104063 -summary: Add serverless scopes for Connector APIs -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104077.yaml b/docs/changelog/104077.yaml deleted file mode 100644 index 7550e7388a29d..0000000000000 --- a/docs/changelog/104077.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104077 -summary: Retry updates to model snapshot ID on job config -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/104091.yaml b/docs/changelog/104091.yaml deleted file mode 100644 index 42609e42471f8..0000000000000 --- a/docs/changelog/104091.yaml +++ /dev/null @@ -1,11 +0,0 @@ -pr: 104091 -summary: "[ESQL] Remove is_nan, is_finite, and `is_infinite`" -area: ES|QL -type: breaking -issues: [] -breaking: - title: "[ESQL] Remove is_nan, is_finite, and `is_infinite`" - area: REST API - details: Removes the functions `is_nan`, `is_finite`, and `is_infinite`. - impact: Attempting to use the above functions will now be a planner time error. These functions are no longer supported. - notable: false diff --git a/docs/changelog/104092.yaml b/docs/changelog/104092.yaml deleted file mode 100644 index b40637d51765e..0000000000000 --- a/docs/changelog/104092.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104092 -summary: Ingest geoip processor cache 'no results' from the database -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/104099.yaml b/docs/changelog/104099.yaml deleted file mode 100644 index b4164896a5923..0000000000000 --- a/docs/changelog/104099.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104099 -summary: Fix `require_alias` implicit true value on presence -area: Indices APIs -type: bug -issues: - - 103945 diff --git a/docs/changelog/104113.yaml b/docs/changelog/104113.yaml deleted file mode 100644 index 3068291606578..0000000000000 --- a/docs/changelog/104113.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104113 -summary: "X-pack/plugin/apm-data: fix `@custom` pipeline support" -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/104118.yaml b/docs/changelog/104118.yaml deleted file mode 100644 index f5afb199bc5eb..0000000000000 --- a/docs/changelog/104118.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104118 -summary: "ESQL: add `date_diff` function" -area: ES|QL -type: enhancement -issues: - - 101942 diff --git a/docs/changelog/104122.yaml b/docs/changelog/104122.yaml deleted file mode 100644 index a88d7499bd44e..0000000000000 --- a/docs/changelog/104122.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104122 -summary: Consider currently refreshing data in the memory usage of refresh -area: Engine -type: bug -issues: [] diff --git a/docs/changelog/104132.yaml b/docs/changelog/104132.yaml deleted file mode 100644 index 87fe94ddcfcea..0000000000000 --- a/docs/changelog/104132.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104132 -summary: Add support for the `simple_query_string` to the Query API Key API -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/104142.yaml b/docs/changelog/104142.yaml deleted file mode 100644 index 08bf9ef759090..0000000000000 --- a/docs/changelog/104142.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104142 -summary: Expose token authentication metrics -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/104150.yaml b/docs/changelog/104150.yaml deleted file mode 100644 index c910542dcf7f6..0000000000000 --- a/docs/changelog/104150.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104150 -summary: Correct profiled rewrite time for knn with a pre-filter -area: Search -type: bug -issues: [] diff --git a/docs/changelog/104155.yaml b/docs/changelog/104155.yaml deleted file mode 100644 index 04d6a9920310a..0000000000000 --- a/docs/changelog/104155.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104155 -summary: "Updated `missingTrainedModel` message to include: you may need to create\ - \ it" -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104182.yaml b/docs/changelog/104182.yaml deleted file mode 100644 index b5cf10f941cc6..0000000000000 --- a/docs/changelog/104182.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104182 -summary: "Apm-data: fix `@custom` component templates" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/104200.yaml b/docs/changelog/104200.yaml deleted file mode 100644 index bc2aa2507f0ec..0000000000000 --- a/docs/changelog/104200.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104200 -summary: Expose realms authentication metrics -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/104209.yaml b/docs/changelog/104209.yaml deleted file mode 100644 index fabf06fb99c2e..0000000000000 --- a/docs/changelog/104209.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 104209 -summary: '`DesiredNode:` deprecate `node_version` field and make it optional (unused) - in current parser' -area: Distributed -type: deprecation -issues: [] -deprecation: - title: '`DesiredNode:` deprecate `node_version` field and make it optional for the current version' - area: REST API - details: The desired_node API includes a `node_version` field to perform validation on the new node version required. - This kind of check is too broad, and it's better done by external logic, so it has been removed, making the - `node_version` field not necessary. The field will be removed in a later version. - impact: Users should update their usages of `desired_node` to not include the `node_version` field anymore. diff --git a/docs/changelog/104218.yaml b/docs/changelog/104218.yaml deleted file mode 100644 index b3051008dc47b..0000000000000 --- a/docs/changelog/104218.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104218 -summary: "Support ST_CENTROID over spatial points" -area: "ES|QL" -type: enhancement -issues: - - 104656 diff --git a/docs/changelog/104227.yaml b/docs/changelog/104227.yaml deleted file mode 100644 index 64dcf844f23f2..0000000000000 --- a/docs/changelog/104227.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104227 -summary: Avoid wrapping searchers multiple times in mget -area: CRUD -type: enhancement -issues: - - 85069 diff --git a/docs/changelog/104230.yaml b/docs/changelog/104230.yaml deleted file mode 100644 index 94184f64586f5..0000000000000 --- a/docs/changelog/104230.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104230 -summary: Undeploy elser when inference model deleted -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/104265.yaml b/docs/changelog/104265.yaml deleted file mode 100644 index 88c3d72ee81d0..0000000000000 --- a/docs/changelog/104265.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104265 -summary: Remove `hashCode` and `equals` from `OperationModeUpdateTask` -area: ILM+SLM -type: bug -issues: - - 100871 diff --git a/docs/changelog/104269.yaml b/docs/changelog/104269.yaml deleted file mode 100644 index 8d4b0fc5d5198..0000000000000 --- a/docs/changelog/104269.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104269 -summary: "ESQL: Support loading shapes from source into WKB blocks" -area: "ES|QL" -type: enhancement -issues: [] diff --git a/docs/changelog/104309.yaml b/docs/changelog/104309.yaml deleted file mode 100644 index 4467eb6722afc..0000000000000 --- a/docs/changelog/104309.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104309 -summary: "ESQL: Add TO_UPPER and TO_LOWER functions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104320.yaml b/docs/changelog/104320.yaml deleted file mode 100644 index d2b0d09070fb9..0000000000000 --- a/docs/changelog/104320.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104320 -summary: Hot-reloadable LDAP bind password -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/104334.yaml b/docs/changelog/104334.yaml deleted file mode 100644 index ff242ee15141b..0000000000000 --- a/docs/changelog/104334.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104334 -summary: Automatically download the ELSER model when PUT in `_inference` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104355.yaml b/docs/changelog/104355.yaml deleted file mode 100644 index 2a100faf3c35f..0000000000000 --- a/docs/changelog/104355.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104355 -summary: Prepare enrich plan to support multi clusters -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104356.yaml b/docs/changelog/104356.yaml deleted file mode 100644 index e0cb2311fbfc9..0000000000000 --- a/docs/changelog/104356.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104356 -summary: "[Profiling] Extract properties faster from source" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104363.yaml b/docs/changelog/104363.yaml deleted file mode 100644 index 9d97991ea7fab..0000000000000 --- a/docs/changelog/104363.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104363 -summary: Apply windowing and chunking to long documents -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104386.yaml b/docs/changelog/104386.yaml deleted file mode 100644 index 41b6a17424bbd..0000000000000 --- a/docs/changelog/104386.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104386 -summary: "X-pack/plugin/apm-data: add dynamic setting for enabling template registry" -area: Data streams -type: enhancement -issues: - - 104385 diff --git a/docs/changelog/104387.yaml b/docs/changelog/104387.yaml deleted file mode 100644 index f10084d8c4b32..0000000000000 --- a/docs/changelog/104387.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104387 -summary: "ESQL: Nested expressions inside stats command" -area: ES|QL -type: enhancement -issues: - - 99828 diff --git a/docs/changelog/104394.yaml b/docs/changelog/104394.yaml deleted file mode 100644 index 39fbfc0c4ea28..0000000000000 --- a/docs/changelog/104394.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104394 -summary: Endpoint to find positions of Grok pattern matches -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104396.yaml b/docs/changelog/104396.yaml deleted file mode 100644 index 586fdc1b22624..0000000000000 --- a/docs/changelog/104396.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104396 -summary: Report current master in `PeerFinder` -area: Cluster Coordination -type: enhancement -issues: [] diff --git a/docs/changelog/104406.yaml b/docs/changelog/104406.yaml deleted file mode 100644 index d26ef664abc07..0000000000000 --- a/docs/changelog/104406.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104406 -summary: Support patch transport version from 8.12 -area: Downsampling -type: enhancement -issues: [] diff --git a/docs/changelog/104407.yaml b/docs/changelog/104407.yaml deleted file mode 100644 index 1ce6b6f97f580..0000000000000 --- a/docs/changelog/104407.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104407 -summary: Set read timeout for fetching IMDSv2 token -area: Discovery-Plugins -type: enhancement -issues: - - 104244 diff --git a/docs/changelog/104408.yaml b/docs/changelog/104408.yaml deleted file mode 100644 index 7303740168ea5..0000000000000 --- a/docs/changelog/104408.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104408 -summary: Move `TransportTermsEnumAction` coordination off transport threads -area: Search -type: bug -issues: [] diff --git a/docs/changelog/104433.yaml b/docs/changelog/104433.yaml deleted file mode 100644 index b3b292923e290..0000000000000 --- a/docs/changelog/104433.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104433 -summary: Added 3 automatic restarts for `pytorch_inference` processes which stop unexpectedly -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104440.yaml b/docs/changelog/104440.yaml deleted file mode 100644 index 4242b7786f05f..0000000000000 --- a/docs/changelog/104440.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104440 -summary: Fix write index resolution when an alias is pointing to a TSDS -area: Data streams -type: bug -issues: - - 104189 diff --git a/docs/changelog/104460.yaml b/docs/changelog/104460.yaml deleted file mode 100644 index c92acdd5cb8ad..0000000000000 --- a/docs/changelog/104460.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104460 -summary: Dyamically adjust node metrics cache expire -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/104483.yaml b/docs/changelog/104483.yaml deleted file mode 100644 index 99917b4e8e017..0000000000000 --- a/docs/changelog/104483.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104483 -summary: Make `task_type` optional in `_inference` APIs -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104500.yaml b/docs/changelog/104500.yaml deleted file mode 100644 index 61c45c6dde3cb..0000000000000 --- a/docs/changelog/104500.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104500 -summary: Thread pool metrics -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/104505.yaml b/docs/changelog/104505.yaml deleted file mode 100644 index 4d0c482a88d85..0000000000000 --- a/docs/changelog/104505.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104505 -summary: "Revert \"x-pack/plugin/apm-data: download geoip DB on pipeline creation\"" -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/104529.yaml b/docs/changelog/104529.yaml deleted file mode 100644 index 5b223a0924d86..0000000000000 --- a/docs/changelog/104529.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104529 -summary: Add rest spec for Query User API -area: Client -type: enhancement -issues: [] diff --git a/docs/changelog/104553.yaml b/docs/changelog/104553.yaml deleted file mode 100644 index e1f5c974bd74e..0000000000000 --- a/docs/changelog/104553.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104553 -summary: "ESQL: Fix a bug loading unindexed text fields" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/104559.yaml b/docs/changelog/104559.yaml deleted file mode 100644 index d6d030783c4cc..0000000000000 --- a/docs/changelog/104559.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104559 -summary: Adding support for Cohere inference service -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/104573.yaml b/docs/changelog/104573.yaml deleted file mode 100644 index a333bc3024772..0000000000000 --- a/docs/changelog/104573.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104573 -summary: Fix logger Strings.format calls -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/104574.yaml b/docs/changelog/104574.yaml deleted file mode 100644 index 68be002142fd9..0000000000000 --- a/docs/changelog/104574.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 104574 -summary: Deprecate `client.type` -area: Infra/Core -type: deprecation -issues: [] -deprecation: - title: Deprecate `client.type` - area: Cluster and node setting - details: The node setting `client.type` has been ignored since the node client was removed in 8.0. The setting is now deprecated and will be removed in a future release. - impact: Remove the `client.type` setting from `elasticsearch.yml` diff --git a/docs/changelog/104575.yaml b/docs/changelog/104575.yaml deleted file mode 100644 index ba17b705fca10..0000000000000 --- a/docs/changelog/104575.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104575 -summary: Introduce Alias.unwrap method -area: "Query Languages" -type: enhancement -issues: [] diff --git a/docs/changelog/104581.yaml b/docs/changelog/104581.yaml deleted file mode 100644 index 5f9b71acbfed7..0000000000000 --- a/docs/changelog/104581.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104581 -summary: Fix bogus assertion tripped by force-executed tasks -area: Infra/Core -type: bug -issues: - - 104580 diff --git a/docs/changelog/104594.yaml b/docs/changelog/104594.yaml deleted file mode 100644 index 7729eb028f68e..0000000000000 --- a/docs/changelog/104594.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104594 -summary: Support of `match` for the Query API Key API -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/104614.yaml b/docs/changelog/104614.yaml deleted file mode 100644 index 9b2c25a643825..0000000000000 --- a/docs/changelog/104614.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104614 -summary: Extend `repository_integrity` health indicator for unknown and invalid repos -area: Health -type: enhancement -issues: - - 103784 diff --git a/docs/changelog/104625.yaml b/docs/changelog/104625.yaml deleted file mode 100644 index 28951936107fb..0000000000000 --- a/docs/changelog/104625.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104625 -summary: "Add support for the `type` parameter, for sorting, to the Query API Key\ - \ API" -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/104636.yaml b/docs/changelog/104636.yaml deleted file mode 100644 index d74682f2eba18..0000000000000 --- a/docs/changelog/104636.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104636 -summary: Modifying request builders -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/104643.yaml b/docs/changelog/104643.yaml deleted file mode 100644 index 5a09cd081b376..0000000000000 --- a/docs/changelog/104643.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104643 -summary: "[Connectors API] Implement update service type action" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104648.yaml b/docs/changelog/104648.yaml deleted file mode 100644 index e8bb5fea392ac..0000000000000 --- a/docs/changelog/104648.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104648 -summary: "[Connector API] Implement update `index_name` action" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104654.yaml b/docs/changelog/104654.yaml deleted file mode 100644 index 1d007ad39a854..0000000000000 --- a/docs/changelog/104654.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104654 -summary: "[Connectors API] Implement update native action endpoint" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104665.yaml b/docs/changelog/104665.yaml deleted file mode 100644 index a7043cbdc9dda..0000000000000 --- a/docs/changelog/104665.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104665 -summary: Restrict usage of certain aggregations when in sort order execution is required -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/104666.yaml b/docs/changelog/104666.yaml deleted file mode 100644 index 5009052bd5b0a..0000000000000 --- a/docs/changelog/104666.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104666 -summary: Require the name field for `inner_hits` for collapse -area: Search -type: bug -issues: [] diff --git a/docs/changelog/104674.yaml b/docs/changelog/104674.yaml deleted file mode 100644 index 12951488f89ce..0000000000000 --- a/docs/changelog/104674.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104674 -summary: "[Profiling] Speed up processing of stacktraces" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104718.yaml b/docs/changelog/104718.yaml deleted file mode 100644 index ffe889bb28a3e..0000000000000 --- a/docs/changelog/104718.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104718 -summary: "ESQL: Fix replacement of nested expressions in aggs with multiple parameters" -area: ES|QL -type: bug -issues: - - 104706 diff --git a/docs/changelog/104721.yaml b/docs/changelog/104721.yaml deleted file mode 100644 index 3bfe8a21646c8..0000000000000 --- a/docs/changelog/104721.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104721 -summary: Add default rollover conditions to ILM explain API response -area: ILM+SLM -type: enhancement -issues: - - 103395 diff --git a/docs/changelog/104730.yaml b/docs/changelog/104730.yaml deleted file mode 100644 index fe5e2e157a004..0000000000000 --- a/docs/changelog/104730.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104730 -summary: "[Profiling] Support downsampling of generic events" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104750.yaml b/docs/changelog/104750.yaml deleted file mode 100644 index 948b19a5eaaa6..0000000000000 --- a/docs/changelog/104750.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104750 -summary: "[Connectors API] Implement connector status update action" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104753.yaml b/docs/changelog/104753.yaml deleted file mode 100644 index f95fd3da44084..0000000000000 --- a/docs/changelog/104753.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104753 -summary: Upgrade to Lucene 9.9.2 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/104778.yaml b/docs/changelog/104778.yaml deleted file mode 100644 index 7dae338efc09c..0000000000000 --- a/docs/changelog/104778.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104778 -summary: Adding a `RequestBuilder` interface -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/104784.yaml b/docs/changelog/104784.yaml deleted file mode 100644 index 3d60222c2aa19..0000000000000 --- a/docs/changelog/104784.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104784 -summary: "Fix blob cache race, decay, time dependency" -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/104787.yaml b/docs/changelog/104787.yaml deleted file mode 100644 index 9c4ce688ce6ad..0000000000000 --- a/docs/changelog/104787.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104787 -summary: Add troubleshooting docs link to `PeerFinder` logs -area: Cluster Coordination -type: enhancement -issues: [] diff --git a/docs/changelog/104796.yaml b/docs/changelog/104796.yaml deleted file mode 100644 index a683f9ce22d49..0000000000000 --- a/docs/changelog/104796.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104796 -summary: "ESQL: Pre-allocate rows in TopNOperator" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104840.yaml b/docs/changelog/104840.yaml deleted file mode 100644 index 5b7d83a966dbc..0000000000000 --- a/docs/changelog/104840.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104840 -summary: Support enrich ANY mode in cross clusters query -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104859.yaml b/docs/changelog/104859.yaml deleted file mode 100644 index 55e5758e31ae2..0000000000000 --- a/docs/changelog/104859.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104859 -summary: ES - document observing with rejections -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/104872.yaml b/docs/changelog/104872.yaml deleted file mode 100644 index ad70946be02ae..0000000000000 --- a/docs/changelog/104872.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104872 -summary: Add new int8_flat and flat vector index types -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/104878.yaml b/docs/changelog/104878.yaml deleted file mode 100644 index 2ae6d5c0c1da3..0000000000000 --- a/docs/changelog/104878.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104878 -summary: "Transforms: Adding basic stats API param" -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/104893.yaml b/docs/changelog/104893.yaml deleted file mode 100644 index e4685e160f8f8..0000000000000 --- a/docs/changelog/104893.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104893 -summary: Release resources in `BestBucketsDeferringCollector` earlier -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/104895.yaml b/docs/changelog/104895.yaml deleted file mode 100644 index 020dcff891f03..0000000000000 --- a/docs/changelog/104895.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104895 -summary: Aggs support for Query API Key Information API -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/104905.yaml b/docs/changelog/104905.yaml deleted file mode 100644 index 80e06dc3b0cf5..0000000000000 --- a/docs/changelog/104905.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104905 -summary: "Execute lazy rollover with an internal dedicated user #104732" -area: Data streams -type: bug -issues: - - 104732 diff --git a/docs/changelog/104909.yaml b/docs/changelog/104909.yaml deleted file mode 100644 index 6d250c22a745a..0000000000000 --- a/docs/changelog/104909.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104909 -summary: "[Connectors API] Relax strict response parsing for get/list operations" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104911.yaml b/docs/changelog/104911.yaml deleted file mode 100644 index 17a335337e345..0000000000000 --- a/docs/changelog/104911.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 104911 -summary: "ES|QL: Improve type validation in aggs for UNSIGNED_LONG better support\ - \ for VERSION" -area: ES|QL -type: bug -issues: - - 102961 diff --git a/docs/changelog/104927.yaml b/docs/changelog/104927.yaml deleted file mode 100644 index e0e098ba10b7b..0000000000000 --- a/docs/changelog/104927.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104927 -summary: Adding `ActionRequestLazyBuilder` implementation of `RequestBuilder` -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/104936.yaml b/docs/changelog/104936.yaml deleted file mode 100644 index cfa170f550681..0000000000000 --- a/docs/changelog/104936.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104936 -summary: Support enrich coordinator mode -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104943.yaml b/docs/changelog/104943.yaml deleted file mode 100644 index 094ce66c4f994..0000000000000 --- a/docs/changelog/104943.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104943 -summary: Fix server cli to always pass through exit code -area: Infra/CLI -type: bug -issues: [] diff --git a/docs/changelog/104949.yaml b/docs/changelog/104949.yaml deleted file mode 100644 index c2682fc911f1d..0000000000000 --- a/docs/changelog/104949.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104949 -summary: Add text_embedding inference service with multilingual-e5 and custom eland models -area: Machine Learning -type: enhancement -issues: [ ] diff --git a/docs/changelog/104958.yaml b/docs/changelog/104958.yaml deleted file mode 100644 index 936342db03b45..0000000000000 --- a/docs/changelog/104958.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104958 -summary: "ESQL: Extend STATS command to support aggregate expressions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104982.yaml b/docs/changelog/104982.yaml deleted file mode 100644 index 62194aa68b80c..0000000000000 --- a/docs/changelog/104982.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104982 -summary: "[Connectors API] Add new field `api_key_secret_id` to Connector" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/104993.yaml b/docs/changelog/104993.yaml deleted file mode 100644 index df9875563d5a1..0000000000000 --- a/docs/changelog/104993.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104993 -summary: Support enrich remote mode -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/104996.yaml b/docs/changelog/104996.yaml deleted file mode 100644 index b94711111adfe..0000000000000 --- a/docs/changelog/104996.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104996 -summary: "Enhancement: Metrics for Search Took Times using Action Listeners" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/105015.yaml b/docs/changelog/105015.yaml deleted file mode 100644 index 94ffc2b0e58d5..0000000000000 --- a/docs/changelog/105015.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105015 -summary: Modify name of threadpool metric for rejected -area: Infra/Metrics -type: enhancement -issues: [] diff --git a/docs/changelog/105044.yaml b/docs/changelog/105044.yaml deleted file mode 100644 index 5a9a11f928f98..0000000000000 --- a/docs/changelog/105044.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105044 -summary: Expose `OperationPurpose` via `CustomQueryParameter` to s3 logs -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/105055.yaml b/docs/changelog/105055.yaml deleted file mode 100644 index 0db70a6b9e558..0000000000000 --- a/docs/changelog/105055.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105055 -summary: "Do not enable APM agent 'instrument', it's not required for manual tracing" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/105062.yaml b/docs/changelog/105062.yaml deleted file mode 100644 index 928786f62381a..0000000000000 --- a/docs/changelog/105062.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105062 -summary: Nest pass-through objects within objects -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/105064.yaml b/docs/changelog/105064.yaml deleted file mode 100644 index 81c62b3148f1c..0000000000000 --- a/docs/changelog/105064.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 105064 -summary: "ES|QL: remove PROJECT keyword from the grammar" -area: ES|QL -type: breaking -issues: [] -breaking: - title: "ES|QL: remove PROJECT keyword from the grammar" - area: REST API - details: "Removes the PROJECT keyword (an alias for KEEP) from ES|QL grammar" - impact: "Before this change, users could use PROJECT as an alias for KEEP in ESQL queries,\ - \ (eg. 'FROM idx | PROJECT name, surname')\ - \ the parser replaced PROJECT with KEEP, emitted a warning:\ - \ 'PROJECT command is no longer supported, please use KEEP instead'\ - \ and the query was executed normally.\ - \ With this change, PROJECT command is no longer recognized by the query parser;\ - \ queries using PROJECT command now return a parsing exception." - notable: false diff --git a/docs/changelog/105081.yaml b/docs/changelog/105081.yaml deleted file mode 100644 index efa686bd7b4a4..0000000000000 --- a/docs/changelog/105081.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105081 -summary: For empty mappings use a `LocalRelation` -area: ES|QL -type: bug -issues: - - 104809 diff --git a/docs/changelog/105088.yaml b/docs/changelog/105088.yaml deleted file mode 100644 index 8b5d1fa7f9e02..0000000000000 --- a/docs/changelog/105088.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105088 -summary: "ESQL: Speed up reading many nulls" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/105089.yaml b/docs/changelog/105089.yaml deleted file mode 100644 index 6f43c58af8a41..0000000000000 --- a/docs/changelog/105089.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105089 -summary: Return results in order -area: Transform -type: bug -issues: - - 104847 diff --git a/docs/changelog/105103.yaml b/docs/changelog/105103.yaml deleted file mode 100644 index 599d2e3666e4b..0000000000000 --- a/docs/changelog/105103.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105103 -summary: Do not record s3 http request time when it is not available -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/105105.yaml b/docs/changelog/105105.yaml deleted file mode 100644 index 848a9637d1388..0000000000000 --- a/docs/changelog/105105.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105105 -summary: Add s3 `HeadObject` request to request stats -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/105131.yaml b/docs/changelog/105131.yaml deleted file mode 100644 index 36993527da583..0000000000000 --- a/docs/changelog/105131.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105131 -summary: "[Connector API] Support filtering by name, index name in list action" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/105150.yaml b/docs/changelog/105150.yaml deleted file mode 100644 index d9fc3d337f952..0000000000000 --- a/docs/changelog/105150.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105150 -summary: Remove `SearchException` usages without a proper status code -area: Search -type: bug -issues: [] diff --git a/docs/changelog/105163.yaml b/docs/changelog/105163.yaml deleted file mode 100644 index f28bf4de14792..0000000000000 --- a/docs/changelog/105163.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105163 -summary: Add stable `ThreadPool` constructor to `LogstashInternalBridge` -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/105164.yaml b/docs/changelog/105164.yaml deleted file mode 100644 index 7affb0911bc6d..0000000000000 --- a/docs/changelog/105164.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105164 -summary: Remove duplicate checkpoint audits -area: Transform -type: bug -issues: - - 105106 diff --git a/docs/changelog/105178.yaml b/docs/changelog/105178.yaml deleted file mode 100644 index e8fc9cfd6898f..0000000000000 --- a/docs/changelog/105178.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105178 -summary: "[Connector API] Support filtering connectors by service type and a query" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/105180.yaml b/docs/changelog/105180.yaml deleted file mode 100644 index ac7ed20f151b7..0000000000000 --- a/docs/changelog/105180.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105180 -summary: Use new `ignore_dynamic_beyond_limit` in logs and metric data streams -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/105192.yaml b/docs/changelog/105192.yaml deleted file mode 100644 index b15d58ef40fe7..0000000000000 --- a/docs/changelog/105192.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105192 -summary: Allow transforms to use PIT with remote clusters again -area: Transform -type: enhancement -issues: - - 104518 diff --git a/docs/changelog/105196.yaml b/docs/changelog/105196.yaml deleted file mode 100644 index 8fe7b50cfa989..0000000000000 --- a/docs/changelog/105196.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105196 -summary: Adding a custom exception for problems with the graph of pipelines to be - applied to a document -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/105207.yaml b/docs/changelog/105207.yaml deleted file mode 100644 index 00d227248abfb..0000000000000 --- a/docs/changelog/105207.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105207 -summary: Introduce an `AggregatorReducer` to reduce the footprint of aggregations - in the coordinating node -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/105221.yaml b/docs/changelog/105221.yaml deleted file mode 100644 index 2ef64ef110d95..0000000000000 --- a/docs/changelog/105221.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 105221 -summary: "ESQL: Grammar - FROM METADATA no longer requires []" -area: ES|QL -type: breaking -issues: [] -breaking: - title: "ESQL: Grammar - FROM METADATA no longer requires []" - area: REST API - details: "Remove [ ] for METADATA option inside FROM command statements" - impact: "Previously to return metadata fields, one had to use square brackets:\ - \ (eg. 'FROM index [METADATA _index]').\ - \ This is no longer needed: the [ ] are dropped and do not have to be specified,\ - \ thus simplifying the command above to:'FROM index METADATA _index'." - notable: false diff --git a/docs/changelog/105223.yaml b/docs/changelog/105223.yaml deleted file mode 100644 index e2a95fcd6ba48..0000000000000 --- a/docs/changelog/105223.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105223 -summary: "x-pack/plugin/apm-data: Add a new field transaction.profiler_stack_trace_ids to traces-apm@mappings.yaml" -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/105232.yaml b/docs/changelog/105232.yaml deleted file mode 100644 index a2ad7ad9451e9..0000000000000 --- a/docs/changelog/105232.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105232 -summary: Execute SAML authentication on the generic threadpool -area: Authentication -type: bug -issues: - - 104962 diff --git a/docs/changelog/105249.yaml b/docs/changelog/105249.yaml deleted file mode 100644 index 979253e452008..0000000000000 --- a/docs/changelog/105249.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105249 -summary: "[Connector API] Support updating configuration values only" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/105259.yaml b/docs/changelog/105259.yaml deleted file mode 100644 index a360bc8bc1672..0000000000000 --- a/docs/changelog/105259.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105259 -summary: Lower G1 minimum full GC interval -area: Infra/Circuit Breakers -type: enhancement -issues: [] diff --git a/docs/changelog/105265.yaml b/docs/changelog/105265.yaml deleted file mode 100644 index 70231dbfabc52..0000000000000 --- a/docs/changelog/105265.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105265 -summary: Improving the performance of the ingest simulate verbose API -area: "Ingest Node" -type: enhancement -issues: [] diff --git a/docs/changelog/105269.yaml b/docs/changelog/105269.yaml deleted file mode 100644 index acf05b05ecfc4..0000000000000 --- a/docs/changelog/105269.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105269 -summary: Reserve bytes before serializing page -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/105272.yaml b/docs/changelog/105272.yaml deleted file mode 100644 index 1032a17fc10f8..0000000000000 --- a/docs/changelog/105272.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105272 -summary: "Stop the periodic health logger when es is stopping" -area: Health -type: bug -issues: [] diff --git a/docs/changelog/105273.yaml b/docs/changelog/105273.yaml deleted file mode 100644 index 83db9eac2a14a..0000000000000 --- a/docs/changelog/105273.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105273 -summary: "x-pack/plugin/core: make automatic rollovers lazy" -area: Data streams -type: enhancement -issues: - - 104083 diff --git a/docs/changelog/105289.yaml b/docs/changelog/105289.yaml deleted file mode 100644 index a51778a93beb8..0000000000000 --- a/docs/changelog/105289.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105289 -summary: "[Connector API] Change required privileges to indices:data/read(write)" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/105299.yaml b/docs/changelog/105299.yaml deleted file mode 100644 index b1f9b3ac4a2aa..0000000000000 --- a/docs/changelog/105299.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105299 -summary: Conditionally send the dimensions field as part of the openai requests -area: Machine Learning -type: enhancement -issues: - - 105005 diff --git a/docs/changelog/105325.yaml b/docs/changelog/105325.yaml deleted file mode 100644 index ab3724efca30f..0000000000000 --- a/docs/changelog/105325.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105325 -summary: "ESQL: Fix Analyzer to not interpret escaped * as a pattern" -area: ES|QL -type: bug -issues: - - 104955 diff --git a/docs/changelog/105334.yaml b/docs/changelog/105334.yaml deleted file mode 100644 index 498fdf4113b3c..0000000000000 --- a/docs/changelog/105334.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105334 -summary: Upgrade ANTLR4 to 4.13.1 -area: Query Languages -type: upgrade -issues: - - 102953 diff --git a/docs/changelog/105346.yaml b/docs/changelog/105346.yaml deleted file mode 100644 index 7c6eab93f6c10..0000000000000 --- a/docs/changelog/105346.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105346 -summary: Allow GET inference models by user a with read only permission -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/105371.yaml b/docs/changelog/105371.yaml deleted file mode 100644 index 500c64b677a10..0000000000000 --- a/docs/changelog/105371.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105371 -summary: "ESQL: Add plan consistency verification after each optimizer" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/105373.yaml b/docs/changelog/105373.yaml deleted file mode 100644 index f9d3c718f7ae3..0000000000000 --- a/docs/changelog/105373.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105373 -summary: "Fix parsing of flattened fields within subobjects: false" -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/105391.yaml b/docs/changelog/105391.yaml deleted file mode 100644 index 6b9b39c00a150..0000000000000 --- a/docs/changelog/105391.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105391 -summary: Catch all the potential exceptions in the ingest processor code -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/105403.yaml b/docs/changelog/105403.yaml deleted file mode 100644 index f855c0e8ed94f..0000000000000 --- a/docs/changelog/105403.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105403 -summary: "ESQL: make `cidr_match` foldable" -area: ES|QL -type: bug -issues: - - 105376 diff --git a/docs/changelog/105427.yaml b/docs/changelog/105427.yaml deleted file mode 100644 index e73853b9dce92..0000000000000 --- a/docs/changelog/105427.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105427 -summary: Adding `executedPipelines` to the `IngestDocument` copy constructor -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/105428.yaml b/docs/changelog/105428.yaml deleted file mode 100644 index 49a80150b4303..0000000000000 --- a/docs/changelog/105428.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105428 -summary: Limiting the number of nested pipelines that can be executed -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/105429.yaml b/docs/changelog/105429.yaml deleted file mode 100644 index 706375649b7ca..0000000000000 --- a/docs/changelog/105429.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105429 -summary: Changed system auditor to use levels -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/105440.yaml b/docs/changelog/105440.yaml deleted file mode 100644 index 8aacac3e641bf..0000000000000 --- a/docs/changelog/105440.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105440 -summary: Avoid false-positive matches on intermediate objects in `ecs@mappings` -area: Data streams -type: bug -issues: - - 102794 diff --git a/docs/changelog/105442.yaml b/docs/changelog/105442.yaml deleted file mode 100644 index b0af1b634d984..0000000000000 --- a/docs/changelog/105442.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105442 -summary: Handling exceptions on watcher reload -area: Watcher -type: bug -issues: - - 69842 diff --git a/docs/changelog/105458.yaml b/docs/changelog/105458.yaml deleted file mode 100644 index 2bab415884975..0000000000000 --- a/docs/changelog/105458.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105458 -summary: The OpenAI model parameter should be in service settings not task settings. Move the configuration field to service settings -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/105468.yaml b/docs/changelog/105468.yaml deleted file mode 100644 index 0de36a71862a4..0000000000000 --- a/docs/changelog/105468.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105468 -summary: Include better output in profiling & `toString` for automaton based queries -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/105476.yaml b/docs/changelog/105476.yaml deleted file mode 100644 index 6520df78520e7..0000000000000 --- a/docs/changelog/105476.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105476 -summary: "ESQL: Fix bug in grammar that allowed spaces inside id pattern" -area: ES|QL -type: bug -issues: - - 105441 diff --git a/docs/changelog/105486.yaml b/docs/changelog/105486.yaml deleted file mode 100644 index befdaec2301c6..0000000000000 --- a/docs/changelog/105486.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105486 -summary: Fix use-after-free at event-loop shutdown -area: Network -type: bug -issues: [] diff --git a/docs/changelog/105499.yaml b/docs/changelog/105499.yaml deleted file mode 100644 index bfc297411efa7..0000000000000 --- a/docs/changelog/105499.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105499 -summary: Fix a bug where destination index aliases are not set up for an unattended transform -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/105546.yaml b/docs/changelog/105546.yaml deleted file mode 100644 index 0b54e124f2495..0000000000000 --- a/docs/changelog/105546.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105546 -summary: '`GlobalOrdCardinalityAggregator` should use `HyperLogLogPlusPlus` instead - of `HyperLogLogPlusPlusSparse`' -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/105578.yaml b/docs/changelog/105578.yaml deleted file mode 100644 index 1ffa0128c1d0a..0000000000000 --- a/docs/changelog/105578.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 105578 -summary: Upgrade to Lucene 9.10.0 -area: Search -type: enhancement -issues: [] -highlight: - title: New Lucene 9.10 release - body: |- - - https://github.com/apache/lucene/pull/13090: Prevent humongous allocations in ScalarQuantizer when building quantiles. - - https://github.com/apache/lucene/pull/12962: Speedup concurrent multi-segment HNSW graph search - - https://github.com/apache/lucene/pull/13033: Range queries on numeric/date/ip fields now exit earlier on segments whose values don't intersect with the query range. This should especially help when there are other required clauses in the `bool` query and when the range filter is narrow, e.g. filtering on the last 5 minutes. - - https://github.com/apache/lucene/pull/13026: `bool` queries that mix `filter` and `should` clauses will now propagate minimum competitive scores through the `should` clauses. This should yield speedups when sorting by descending score. - notable: true diff --git a/docs/changelog/105588.yaml b/docs/changelog/105588.yaml deleted file mode 100644 index e43ff8cd75c60..0000000000000 --- a/docs/changelog/105588.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105588 -summary: '`URLRepository` should not block shutdown' -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/105593.yaml b/docs/changelog/105593.yaml deleted file mode 100644 index 4eef0d9404f42..0000000000000 --- a/docs/changelog/105593.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105593 -summary: "ESQL: push down \"[text_field] is not null\"" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/105633.yaml b/docs/changelog/105633.yaml deleted file mode 100644 index b19ec67f4602a..0000000000000 --- a/docs/changelog/105633.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105633 -summary: "[Connector API] Bugfix: support list type in filtering advenced snippet\ - \ value" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/105650.yaml b/docs/changelog/105650.yaml deleted file mode 100644 index f43da5b315f4c..0000000000000 --- a/docs/changelog/105650.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105650 -summary: "ESQL: Fix wrong attribute shadowing in pushdown rules" -area: ES|QL -type: bug -issues: - - 105434 diff --git a/docs/changelog/105691.yaml b/docs/changelog/105691.yaml deleted file mode 100644 index 89797782b06ee..0000000000000 --- a/docs/changelog/105691.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105691 -summary: "ES|QL: Disable optimizations that rely on Expression.nullable()" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/105770.yaml b/docs/changelog/105770.yaml deleted file mode 100644 index ec8ae4f380e2f..0000000000000 --- a/docs/changelog/105770.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105770 -summary: Field-caps field has value lookup use map instead of looping array -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/105772.yaml b/docs/changelog/105772.yaml deleted file mode 100644 index 73680aa04e5ab..0000000000000 --- a/docs/changelog/105772.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105772 -summary: "[ILM] Delete step deletes data stream with only one index" -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/105789.yaml b/docs/changelog/105789.yaml deleted file mode 100644 index 02a6936fa3294..0000000000000 --- a/docs/changelog/105789.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105789 -summary: Make Health API more resilient to multi-version clusters -area: Health -type: bug -issues: - - 90183 diff --git a/docs/changelog/105848.yaml b/docs/changelog/105848.yaml deleted file mode 100644 index 18291066177f6..0000000000000 --- a/docs/changelog/105848.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105848 -summary: '`ProjectOperator` should not retain references to released blocks' -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/105941.yaml b/docs/changelog/105941.yaml deleted file mode 100644 index 8e2eea1657208..0000000000000 --- a/docs/changelog/105941.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105941 -summary: Field caps performance pt2 -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/105945.yaml b/docs/changelog/105945.yaml deleted file mode 100644 index ec76faf6ef76f..0000000000000 --- a/docs/changelog/105945.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105945 -summary: "[Connector API] Fix default ordering in `SyncJob` list endpoint" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/105987.yaml b/docs/changelog/105987.yaml deleted file mode 100644 index d09a6907c72bf..0000000000000 --- a/docs/changelog/105987.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 105987 -summary: Fix `categorize_text` aggregation nested under empty buckets -area: Machine Learning -type: bug -issues: - - 105836 diff --git a/docs/changelog/105994.yaml b/docs/changelog/105994.yaml deleted file mode 100644 index ef9889d0a47af..0000000000000 --- a/docs/changelog/105994.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 105994 -summary: Fix bug when nested knn pre-filter might match nested docs -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/106020.yaml b/docs/changelog/106020.yaml deleted file mode 100644 index 094a43b430f89..0000000000000 --- a/docs/changelog/106020.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106020 -summary: Fix resetting a job if the original reset task no longer exists. -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/106057.yaml b/docs/changelog/106057.yaml deleted file mode 100644 index c07f658fbbf8a..0000000000000 --- a/docs/changelog/106057.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106057 -summary: Avoid computing `currentInferenceProcessors` on every cluster state -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/106060.yaml b/docs/changelog/106060.yaml deleted file mode 100644 index 2b6a47372ddd3..0000000000000 --- a/docs/changelog/106060.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106060 -summary: "[Connector API] Fix serialisation of script params in connector index service" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/106062.yaml b/docs/changelog/106062.yaml deleted file mode 100644 index f4ff3df4045e6..0000000000000 --- a/docs/changelog/106062.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106062 -summary: "During ML maintenance, reset jobs in the reset state without a corresponding\ - \ task" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/106105.yaml b/docs/changelog/106105.yaml deleted file mode 100644 index 09f80e9e71e6d..0000000000000 --- a/docs/changelog/106105.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106105 -summary: Respect --pass option in certutil csr mode -area: TLS -type: bug -issues: [] diff --git a/docs/changelog/106156.yaml b/docs/changelog/106156.yaml deleted file mode 100644 index 63232efe6e5fb..0000000000000 --- a/docs/changelog/106156.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106156 -summary: Disable parallel collection for terms aggregation with `min_doc_count` equals - to 0 -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/106288.yaml b/docs/changelog/106288.yaml deleted file mode 100644 index 0f14e53c237a1..0000000000000 --- a/docs/changelog/106288.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106288 -summary: Small time series agg improvement -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/106329.yaml b/docs/changelog/106329.yaml deleted file mode 100644 index 78e811e7987b6..0000000000000 --- a/docs/changelog/106329.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 106329 -summary: Fix Search Applications bug where deleting an alias before deleting an application intermittently caused errors -area: Application -type: bug -issues: [] diff --git a/docs/changelog/106338.yaml b/docs/changelog/106338.yaml new file mode 100644 index 0000000000000..c05826d87a11f --- /dev/null +++ b/docs/changelog/106338.yaml @@ -0,0 +1,6 @@ +pr: 106338 +summary: Text fields are stored by default in TSDB indices +area: TSDB +type: enhancement +issues: + - 97039 diff --git a/docs/changelog/106351.yaml b/docs/changelog/106351.yaml deleted file mode 100644 index 45868acc3a284..0000000000000 --- a/docs/changelog/106351.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106351 -summary: "Fix error on sorting unsortable `geo_point` and `cartesian_point`" -area: ES|QL -type: bug -issues: - - 106007 diff --git a/docs/changelog/106392.yaml b/docs/changelog/106392.yaml deleted file mode 100644 index ff1a0284ee5db..0000000000000 --- a/docs/changelog/106392.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106392 -summary: Resume driver when failing to fetch pages -area: ES|QL -type: bug -issues: - - 106262 diff --git a/docs/changelog/106398.yaml b/docs/changelog/106398.yaml deleted file mode 100644 index cffc5ceeb214d..0000000000000 --- a/docs/changelog/106398.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106398 -summary: Release `TranslogSnapshot` buffer after iteration -area: Engine -type: bug -issues: - - 106390 diff --git a/docs/changelog/106544.yaml b/docs/changelog/106544.yaml deleted file mode 100644 index 6557ba478126d..0000000000000 --- a/docs/changelog/106544.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106544 -summary: Force execution of `SearchService.Reaper` -area: Search -type: bug -issues: - - 106543 diff --git a/docs/changelog/106574.yaml b/docs/changelog/106574.yaml deleted file mode 100644 index 8063450bc0db1..0000000000000 --- a/docs/changelog/106574.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 106574 -summary: Fix `_reset` API when called with `force=true` on a failed transform -area: Transform -type: bug -issues: - - 106573 diff --git a/docs/changelog/106678.yaml b/docs/changelog/106678.yaml new file mode 100644 index 0000000000000..20bf12d6d4346 --- /dev/null +++ b/docs/changelog/106678.yaml @@ -0,0 +1,6 @@ +pr: 106678 +summary: Fix concurrency bug in `AbstractStringScriptFieldAutomatonQuery` +area: Search +type: bug +issues: + - 105911 diff --git a/docs/changelog/106731.yaml b/docs/changelog/106731.yaml new file mode 100644 index 0000000000000..0d8e16a8f9616 --- /dev/null +++ b/docs/changelog/106731.yaml @@ -0,0 +1,5 @@ +pr: 106731 +summary: Fix field caps and field level security +area: Security +type: bug +issues: [] diff --git a/docs/changelog/96235.yaml b/docs/changelog/96235.yaml deleted file mode 100644 index 83d1eaf74916b..0000000000000 --- a/docs/changelog/96235.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96235 -summary: Add `index.mapping.total_fields.ignore_dynamic_beyond_limit` setting to ignore dynamic fields when field limit is reached -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/99142.yaml b/docs/changelog/99142.yaml deleted file mode 100644 index 885946cec909b..0000000000000 --- a/docs/changelog/99142.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99142 -summary: Reuse number field mapper tests in other modules -area: Search -type: enhancement -issues: - - 92947 diff --git a/docs/changelog/99747.yaml b/docs/changelog/99747.yaml deleted file mode 100644 index e3e6edc585ca6..0000000000000 --- a/docs/changelog/99747.yaml +++ /dev/null @@ -1,19 +0,0 @@ -pr: 99747 -summary: Improve storage efficiency for non-metric fields in TSDB -area: TSDB -type: enhancement -issues: [] -highlight: - title: Improve storage efficiency for non-metric fields in TSDB - body: |- - Adds a new `doc_values` encoding for non-metric fields in TSDB that takes advantage of TSDB's index sorting. - While terms that are used in multiple documents (such as the host name) are already stored only once in the terms dictionary, - there are a lot of repetitions in the references to the terms dictionary that are stored in `doc_values` (ordinals). - In TSDB, documents (and therefore `doc_values`) are implicitly sorted by dimenstions and timestamp. - This means that for each time series, we are storing long consecutive runs of the same ordinal. - With this change, we are introducing an encoding that detects and efficiently stores runs of the same value (such as `1 1 1 2 2 2 …`), - and runs of cycling values (such as `1 2 1 2 …`). - In our testing, we have seen a reduction in storage size by about 13%. - The effectiveness of this encoding depends on how many non-metric fields, such as dimensions, are used. - The more non-metric fields, the more effective this improvement will be. - notable: true diff --git a/docs/changelog/99961.yaml b/docs/changelog/99961.yaml deleted file mode 100644 index 457f7801ce218..0000000000000 --- a/docs/changelog/99961.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99961 -summary: "ESQL: Correct out-of-range filter pushdowns" -area: ES|QL -type: bug -issues: - - 99960 diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index 2a6e2f3ef8ae8..c33af69df5607 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -133,8 +133,11 @@ The following parameters are accepted by `text` fields: <>:: Whether the field value should be stored and retrievable separately from - the <> field. Accepts `true` or `false` - (default). + the <> field. Accepts `true` or `false` (default). + This parameter will be automatically set to `true` for TSDB indices + (indices that have `index.mode` set to `time_series`) + if there is no <> + sub-field that supports synthetic `_source`. <>:: diff --git a/docs/reference/migration/migrate_8_13.asciidoc b/docs/reference/migration/migrate_8_13.asciidoc index c2f431da388f1..c9e726d940b1d 100644 --- a/docs/reference/migration/migrate_8_13.asciidoc +++ b/docs/reference/migration/migrate_8_13.asciidoc @@ -16,5 +16,119 @@ coming::[8.13.0] [[breaking-changes-8.13]] === Breaking changes -There are no breaking changes in {es} 8.13. +The following changes in {es} 8.13 might affect your applications +and prevent them from operating normally. +Before upgrading to 8.13, review these changes and take the described steps +to mitigate the impact. + + +There are no notable breaking changes in {es} 8.13. +But there are some less critical breaking changes. + +[discrete] +[[breaking_813_index_setting_changes]] +==== Index setting changes + +[[change_index_look_ahead_time_index_settings_default_value_from_2_hours_to_30_minutes]] +.Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. +[%collapsible] +==== +*Details* + +Lower the `index.look_ahead_time` index setting's max value from 2 hours to 30 minutes. + +*Impact* + +Documents with @timestamp of 30 minutes or more in the future will be rejected. Before documents with @timestamp of 2 hours or more in the future were rejected. If the previous behaviour should be kept, then update the `index.look_ahead_time` setting to two hours before performing the upgrade. +==== + +[[lower_look_ahead_time_index_settings_max_value]] +.Lower the `look_ahead_time` index setting's max value +[%collapsible] +==== +*Details* + +Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours. + +*Impact* + +Any value between 2 hours and 7 days will be as a look ahead time of 2 hours is defined +==== + +[discrete] +[[breaking_813_rest_api_changes]] +==== REST API changes + +[[esql_grammar_from_metadata_no_longer_requires]] +.ESQL: Grammar - FROM METADATA no longer requires [] +[%collapsible] +==== +*Details* + +Remove [ ] for METADATA option inside FROM command statements + +*Impact* + +Previously to return metadata fields, one had to use square brackets: (eg. 'FROM index [METADATA _index]'). This is no longer needed: the [ ] are dropped and do not have to be specified, thus simplifying the command above to:'FROM index METADATA _index'. +==== + +[[es_ql_remove_project_keyword_from_grammar]] +.ES|QL: remove PROJECT keyword from the grammar +[%collapsible] +==== +*Details* + +Removes the PROJECT keyword (an alias for KEEP) from ES|QL grammar + +*Impact* + +Before this change, users could use PROJECT as an alias for KEEP in ESQL queries, (eg. 'FROM idx | PROJECT name, surname') the parser replaced PROJECT with KEEP, emitted a warning: 'PROJECT command is no longer supported, please use KEEP instead' and the query was executed normally. With this change, PROJECT command is no longer recognized by the query parser; queries using PROJECT command now return a parsing exception. +==== + +[[esql_remove_nan_finite_infinite]] +.[ESQL] Remove is_nan, is_finite, and `is_infinite` +[%collapsible] +==== +*Details* + +Removes the functions `is_nan`, `is_finite`, and `is_infinite`. + +*Impact* + +Attempting to use the above functions will now be a planner time error. These functions are no longer supported. +==== + + +[discrete] +[[deprecated-8.13]] +=== Deprecations + +The following functionality has been deprecated in {es} 8.13 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 8.13. + +To find out if you are using any deprecated functionality, +enable <>. + +[discrete] +[[deprecations_813_cluster_and_node_setting]] +==== Cluster and node setting deprecations + +[[deprecate_client_type]] +.Deprecate `client.type` +[%collapsible] +==== +*Details* + +The node setting `client.type` has been ignored since the node client was removed in 8.0. The setting is now deprecated and will be removed in a future release. + +*Impact* + +Remove the `client.type` setting from `elasticsearch.yml` +==== + +[discrete] +[[deprecations_813_rest_api]] +==== REST API deprecations + +[[desirednode_deprecate_node_version_field_make_it_optional_for_current_version]] +.`DesiredNode:` deprecate `node_version` field and make it optional for the current version +[%collapsible] +==== +*Details* + +The desired_node API includes a `node_version` field to perform validation on the new node version required. This kind of check is too broad, and it's better done by external logic, so it has been removed, making the `node_version` field not necessary. The field will be removed in a later version. + +*Impact* + +Users should update their usages of `desired_node` to not include the `node_version` field anymore. +==== diff --git a/docs/reference/release-notes/8.13.0.asciidoc b/docs/reference/release-notes/8.13.0.asciidoc index 5b7d4f90f98de..2ef183374f167 100644 --- a/docs/reference/release-notes/8.13.0.asciidoc +++ b/docs/reference/release-notes/8.13.0.asciidoc @@ -1,8 +1,443 @@ [[release-notes-8.13.0]] == {es} version 8.13.0 -coming[8.13.0] - Also see <>. +[[breaking-8.13.0]] +[float] +=== Breaking changes + +ES|QL:: +* ESQL: Grammar - FROM METADATA no longer requires [] {es-pull}105221[#105221] +* ES|QL: remove PROJECT keyword from the grammar {es-pull}105064[#105064] +* [ESQL] Remove is_nan, is_finite, and `is_infinite` {es-pull}104091[#104091] + +TSDB:: +* Change `index.look_ahead_time` index setting's default value from 2 hours to 30 minutes. {es-pull}103898[#103898] +* Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours. {es-pull}103434[#103434] + +[[bug-8.13.0]] +[float] +=== Bug fixes + +Aggregations:: +* Disable parallel collection for terms aggregation with `min_doc_count` equals to 0 {es-pull}106156[#106156] +* `GlobalOrdCardinalityAggregator` should use `HyperLogLogPlusPlus` instead of `HyperLogLogPlusPlusSparse` {es-pull}105546[#105546] + +Allocation:: +* Fix disk computation when initializing new shards {es-pull}102879[#102879] +* Fix disk computation when initializing unassigned shards in desired balance computation {es-pull}102207[#102207] + +Application:: +* Fix Search Applications bug where deleting an alias before deleting an application intermittently caused errors {es-pull}106329[#106329] +* Use search to determine if cluster contains data {es-pull}103920[#103920] +* [Connector API] Bugfix: support list type in filtering advenced snippet value {es-pull}105633[#105633] +* [Connector API] Fix default ordering in `SyncJob` list endpoint {es-pull}105945[#105945] +* [Connector API] Fix serialisation of script params in connector index service {es-pull}106060[#106060] + +Authentication:: +* Execute SAML authentication on the generic threadpool {es-pull}105232[#105232] (issue: {es-issue}104962[#104962]) + +Authorization:: +* Adjust interception of requests for specific shard IDs {es-pull}101656[#101656] + +Client:: +* Validate settings in `ReloadSecureSettings` API {es-pull}103176[#103176] + +Data streams:: +* Apm-data: fix `@custom` component templates {es-pull}104182[#104182] +* Avoid false-positive matches on intermediate objects in `ecs@mappings` {es-pull}105440[#105440] (issue: {es-issue}102794[#102794]) +* Execute lazy rollover with an internal dedicated user #104732 {es-pull}104905[#104905] (issue: {es-issue}104732[#104732]) +* Fix write index resolution when an alias is pointing to a TSDS {es-pull}104440[#104440] (issue: {es-issue}104189[#104189]) +* x-pack/plugin/core: add `match_mapping_type` to `ecs@mappings` dynamic templates {es-pull}103035[#103035] + +Distributed:: +* Fix logger Strings.format calls {es-pull}104573[#104573] +* Request indexing memory pressure in APM node metrics publisher {es-pull}103520[#103520] + +ES|QL:: +* ESQL: Add single value checks on LIKE/RLIKE pushdown {es-pull}103807[#103807] (issue: {es-issue}103806[#103806]) +* ESQL: Correct out-of-range filter pushdowns {es-pull}99961[#99961] (issue: {es-issue}99960[#99960]) +* ESQL: Fix Analyzer to not interpret escaped * as a pattern {es-pull}105325[#105325] (issue: {es-issue}104955[#104955]) +* ESQL: Fix a bug loading unindexed text fields {es-pull}104553[#104553] +* ESQL: Fix bug in grammar that allowed spaces inside id pattern {es-pull}105476[#105476] (issue: {es-issue}105441[#105441]) +* ESQL: Fix replacement of nested expressions in aggs with multiple parameters {es-pull}104718[#104718] (issue: {es-issue}104706[#104706]) +* ESQL: Fix wrong attribute shadowing in pushdown rules {es-pull}105650[#105650] (issue: {es-issue}105434[#105434]) +* ESQL: Improve pushdown of certain filters {es-pull}103538[#103538] (issue: {es-issue}103536[#103536]) +* ESQL: allow `null` in date math {es-pull}103610[#103610] (issue: {es-issue}103085[#103085]) +* ESQL: make `cidr_match` foldable {es-pull}105403[#105403] (issue: {es-issue}105376[#105376]) +* ES|QL: Disable optimizations that rely on Expression.nullable() {es-pull}105691[#105691] +* ES|QL: Improve type validation in aggs for UNSIGNED_LONG better support for VERSION {es-pull}104911[#104911] (issue: {es-issue}102961[#102961]) +* ES|QL: better management of exact subfields for TEXT fields {es-pull}103510[#103510] (issue: {es-issue}99899[#99899]) +* Fix error on sorting unsortable `geo_point` and `cartesian_point` {es-pull}106351[#106351] (issue: {es-issue}106007[#106007]) +* For empty mappings use a `LocalRelation` {es-pull}105081[#105081] (issue: {es-issue}104809[#104809]) +* Resume driver when failing to fetch pages {es-pull}106392[#106392] (issue: {es-issue}106262[#106262]) +* Review KEEP logic to prevent duplicate column names {es-pull}103316[#103316] +* `ProjectOperator` should not retain references to released blocks {es-pull}105848[#105848] + +Engine:: +* Consider currently refreshing data in the memory usage of refresh {es-pull}104122[#104122] +* Release `TranslogSnapshot` buffer after iteration {es-pull}106398[#106398] (issue: {es-issue}106390[#106390]) + +Health:: +* Make Health API more resilient to multi-version clusters {es-pull}105789[#105789] (issue: {es-issue}90183[#90183]) +* Stop the periodic health logger when es is stopping {es-pull}105272[#105272] + +ILM+SLM:: +* Remove `hashCode` and `equals` from `OperationModeUpdateTask` {es-pull}104265[#104265] (issue: {es-issue}100871[#100871]) +* [ILM] Delete step deletes data stream with only one index {es-pull}105772[#105772] + +Indices APIs:: +* Fix `require_alias` implicit true value on presence {es-pull}104099[#104099] (issue: {es-issue}103945[#103945]) + +Infra/CLI:: +* Fix server cli to always pass through exit code {es-pull}104943[#104943] + +Infra/Core:: +* Do not enable APM agent 'instrument', it's not required for manual tracing {es-pull}105055[#105055] +* Fix bogus assertion tripped by force-executed tasks {es-pull}104581[#104581] (issue: {es-issue}104580[#104580]) +* Metrics: Allow `AsyncCounters` to switch providers {es-pull}103025[#103025] +* Metrics: Handle null observations in observers {es-pull}103091[#103091] + +Infra/Node Lifecycle:: +* Close rather than stop `HttpServerTransport` on shutdown {es-pull}102759[#102759] (issue: {es-issue}102501[#102501]) + +Ingest Node:: +* Add stable `ThreadPool` constructor to `LogstashInternalBridge` {es-pull}105163[#105163] +* Adding `executedPipelines` to the `IngestDocument` copy constructor {es-pull}105427[#105427] +* Revert "x-pack/plugin/apm-data: download geoip DB on pipeline creation" {es-pull}104505[#104505] +* X-pack/plugin/apm-data: fix `@custom` pipeline support {es-pull}104113[#104113] + +Machine Learning:: +* Allow GET inference models by user a with read only permission {es-pull}105346[#105346] +* Avoid computing `currentInferenceProcessors` on every cluster state {es-pull}106057[#106057] +* Catch all the potential exceptions in the ingest processor code {es-pull}105391[#105391] +* Changed system auditor to use levels {es-pull}105429[#105429] +* During ML maintenance, reset jobs in the reset state without a corresponding task {es-pull}106062[#106062] +* Fix `categorize_text` aggregation nested under empty buckets {es-pull}105987[#105987] (issue: {es-issue}105836[#105836]) +* Fix resetting a job if the original reset task no longer exists. {es-pull}106020[#106020] +* Retry updates to model snapshot ID on job config {es-pull}104077[#104077] +* The OpenAI model parameter should be in service settings not task settings. Move the configuration field to service settings {es-pull}105458[#105458] +* Undeploy elser when inference model deleted {es-pull}104230[#104230] + +Mapping:: +* Fix parsing of flattened fields within subobjects: false {es-pull}105373[#105373] + +Network:: +* Fix use-after-free at event-loop shutdown {es-pull}105486[#105486] + +Search:: +* Correct profiled rewrite time for knn with a pre-filter {es-pull}104150[#104150] +* Force execution of `SearchService.Reaper` {es-pull}106544[#106544] (issue: {es-issue}106543[#106543]) +* Move `TransportTermsEnumAction` coordination off transport threads {es-pull}104408[#104408] +* Remove `SearchException` usages without a proper status code {es-pull}105150[#105150] +* Require the name field for `inner_hits` for collapse {es-pull}104666[#104666] +* add validation on _id field when upsert new doc {es-pull}103399[#103399] (issue: {es-issue}102981[#102981]) + +Security:: +* Revert "Validate settings in `ReloadSecureSettings` API" {es-pull}103310[#103310] + +Snapshot/Restore:: +* Do not record s3 http request time when it is not available {es-pull}105103[#105103] +* `URLRepository` should not block shutdown {es-pull}105588[#105588] + +TLS:: +* Respect --pass option in certutil csr mode {es-pull}106105[#106105] + +Transform:: +* Fix `_reset` API when called with `force=true` on a failed transform {es-pull}106574[#106574] (issue: {es-issue}106573[#106573]) +* Fix a bug where destination index aliases are not set up for an unattended transform {es-pull}105499[#105499] +* Remove duplicate checkpoint audits {es-pull}105164[#105164] (issue: {es-issue}105106[#105106]) +* Return results in order {es-pull}105089[#105089] (issue: {es-issue}104847[#104847]) +* Use deduced mappings for determining proper fields' format even if `deduce_mappings==false` {es-pull}103682[#103682] (issue: {es-issue}103115[#103115]) + +Vector Search:: +* Fix bug when nested knn pre-filter might match nested docs {es-pull}105994[#105994] + +Watcher:: +* Handling exceptions on watcher reload {es-pull}105442[#105442] (issue: {es-issue}69842[#69842]) + +[[deprecation-8.13.0]] +[float] +=== Deprecations + +Distributed:: +* `DesiredNode:` deprecate `node_version` field and make it optional (unused) in current parser {es-pull}104209[#104209] + +Infra/Core:: +* Deprecate `client.type` {es-pull}104574[#104574] + +[[enhancement-8.13.0]] +[float] +=== Enhancements + +Aggregations:: +* Add index mapping parameter for `counted_keyword` {es-pull}103646[#103646] +* Introduce an `AggregatorReducer` to reduce the footprint of aggregations in the coordinating node {es-pull}105207[#105207] +* Release resources in `BestBucketsDeferringCollector` earlier {es-pull}104893[#104893] +* Support sampling in `counted_terms` aggregation {es-pull}103846[#103846] + +Allocation:: +* Account for reserved disk size {es-pull}103903[#103903] +* Derive expected replica size from primary {es-pull}102078[#102078] + +Application:: +* Add serverless scopes for Connector APIs {es-pull}104063[#104063] +* [Connector API] Change required privileges to indices:data/read(write) {es-pull}105289[#105289] +* [Connector API] Implement update `index_name` action {es-pull}104648[#104648] +* [Connector API] Support filtering by name, index name in list action {es-pull}105131[#105131] +* [Connector API] Support filtering connectors by service type and a query {es-pull}105178[#105178] +* [Connector API] Support updating configuration values only {es-pull}105249[#105249] +* [Connectors API] Add new field `api_key_secret_id` to Connector {es-pull}104982[#104982] +* [Connectors API] Implement connector status update action {es-pull}104750[#104750] +* [Connectors API] Implement update native action endpoint {es-pull}104654[#104654] +* [Connectors API] Implement update service type action {es-pull}104643[#104643] +* [Connectors API] Relax strict response parsing for get/list operations {es-pull}104909[#104909] +* [Profiling] Extract properties faster from source {es-pull}104356[#104356] +* [Profiling] Mark all templates as managed {es-pull}103783[#103783] +* [Profiling] Speed up processing of stacktraces {es-pull}104674[#104674] +* [Profiling] Support downsampling of generic events {es-pull}104730[#104730] +* [Profiling] Use shard request cache consistently {es-pull}103643[#103643] + +Authentication:: +* Expose API key authentication metrics {es-pull}103178[#103178] +* Expose realms authentication metrics {es-pull}104200[#104200] +* Expose service account authentication metrics {es-pull}104043[#104043] +* Expose token authentication metrics {es-pull}104142[#104142] +* Hot-reloadable LDAP bind password {es-pull}104320[#104320] +* Support of `match` for the Query API Key API {es-pull}104594[#104594] + +Authorization:: +* [Security Solution] Allow write permission for `kibana_system` role on endpoint response index {es-pull}103555[#103555] + +CRUD:: +* Avoid wrapping searchers multiple times in mget {es-pull}104227[#104227] (issue: {es-issue}85069[#85069]) + +Client:: +* Add rest spec for Query User API {es-pull}104529[#104529] + +Cluster Coordination:: +* Add troubleshooting docs link to `PeerFinder` logs {es-pull}104787[#104787] +* Report current master in `PeerFinder` {es-pull}104396[#104396] + +Data streams:: +* Introduce lazy rollover for mapping updates in data streams {es-pull}103309[#103309] (issue: {es-issue}89346[#89346]) +* Use new `ignore_dynamic_beyond_limit` in logs and metric data streams {es-pull}105180[#105180] +* X-pack/plugin/apm-data: add dynamic setting for enabling template registry {es-pull}104386[#104386] (issue: {es-issue}104385[#104385]) +* X-pack/plugin/core: rename `double_metrics` template {es-pull}103033[#103033] +* x-pack/plugin/apm-data: Add a new field transaction.profiler_stack_trace_ids to traces-apm@mappings.yaml {es-pull}105223[#105223] +* x-pack/plugin/apm-data: Map some APM fields as flattened and fix error.grouping_name script {es-pull}103032[#103032] +* x-pack/plugin/core: make automatic rollovers lazy {es-pull}105273[#105273] (issue: {es-issue}104083[#104083]) + +Discovery-Plugins:: +* Set read timeout for fetching IMDSv2 token {es-pull}104407[#104407] (issue: {es-issue}104244[#104244]) + +Downsampling:: +* Support patch transport version from 8.12 {es-pull}104406[#104406] + +ES|QL:: +* Add ES|QL async delete API {es-pull}103628[#103628] +* Avoid humongous blocks {es-pull}103340[#103340] +* ESQL: Add TO_UPPER and TO_LOWER functions {es-pull}104309[#104309] +* ESQL: Add option to drop null fields {es-pull}102428[#102428] +* ESQL: Add plan consistency verification after each optimizer {es-pull}105371[#105371] +* ESQL: Check field exists before load from `_source` {es-pull}103632[#103632] +* ESQL: Delay finding field load infrastructure {es-pull}103821[#103821] +* ESQL: Expand shallow copy with vecs {es-pull}103681[#103681] (issue: {es-issue}100528[#100528]) +* ESQL: Extend STATS command to support aggregate expressions {es-pull}104958[#104958] +* ESQL: Infer not null for aggregated fields {es-pull}103673[#103673] (issue: {es-issue}102787[#102787]) +* ESQL: Nested expressions inside stats command {es-pull}104387[#104387] (issue: {es-issue}99828[#99828]) +* ESQL: Pre-allocate rows in TopNOperator {es-pull}104796[#104796] +* ESQL: Referencing expressions that contain backticks requires <>. {es-pull}100740[#100740] (issue: {es-issue}100312[#100312]) +* ESQL: Simpify IS NULL/IS NOT NULL evaluation {es-pull}103099[#103099] (issue: {es-issue}103097[#103097]) +* ESQL: Speed up reading many nulls {es-pull}105088[#105088] +* ESQL: Support loading shapes from source into WKB blocks {es-pull}104269[#104269] +* ESQL: Track the rest of `DocVector` {es-pull}103727[#103727] +* ESQL: `MV_FIRST` and `MV_LAST` {es-pull}103928[#103928] +* ESQL: add `date_diff` function {es-pull}104118[#104118] (issue: {es-issue}101942[#101942]) +* ESQL: push down "[text_field] is not null" {es-pull}105593[#105593] +* ES|QL Async Query API {es-pull}103398[#103398] +* Prepare enrich plan to support multi clusters {es-pull}104355[#104355] +* Reading points from source to reduce precision loss {es-pull}103698[#103698] +* Remove deprecated Block APIs {es-pull}103592[#103592] +* Reserve bytes before serializing page {es-pull}105269[#105269] +* Support ST_CENTROID over spatial points {es-pull}104218[#104218] (issue: {es-issue}104656[#104656]) +* Support cross clusters query in ESQL {es-pull}101640[#101640] +* Support enrich ANY mode in cross clusters query {es-pull}104840[#104840] +* Support enrich coordinator mode {es-pull}104936[#104936] +* Support enrich remote mode {es-pull}104993[#104993] + +Geo:: +* Add support for Well Known Binary (WKB) in the fields API for spatial fields {es-pull}103461[#103461] +* Add the possibility to transform WKT to WKB directly {es-pull}104030[#104030] + +Health:: +* Add APM metrics to `HealthPeriodicLogger` {es-pull}102765[#102765] +* Extend `repository_integrity` health indicator for unknown and invalid repos {es-pull}104614[#104614] (issue: {es-issue}103784[#103784]) + +ILM+SLM:: +* Add "step":"ERROR" to ILM explain response for missing policy {es-pull}103720[#103720] (issue: {es-issue}99030[#99030]) +* Add default rollover conditions to ILM explain API response {es-pull}104721[#104721] (issue: {es-issue}103395[#103395]) +* ILM/SLM history policies forcemerge in hot and dsl configuration {es-pull}103190[#103190] + +Infra/CLI:: +* Add replay diagnostic dir to system jvm options {es-pull}103535[#103535] + +Infra/Circuit Breakers:: +* Lower G1 minimum full GC interval {es-pull}105259[#105259] + +Infra/Core:: +* Adding threadpool metrics {es-pull}102371[#102371] +* ES - document observing with rejections {es-pull}104859[#104859] +* Thread pool metrics {es-pull}104500[#104500] + +Infra/Metrics:: +* Modify name of threadpool metric for rejected {es-pull}105015[#105015] + +Infra/Node Lifecycle:: +* Wait for async searches to finish when shutting down {es-pull}101487[#101487] + +Infra/Transport API:: +* Make `ParentTaskAssigningClient.getRemoteClusterClient` method also return `ParentTaskAssigningClient` {es-pull}100813[#100813] + +Ingest Node:: +* Adding `ActionRequestLazyBuilder` implementation of `RequestBuilder` {es-pull}104927[#104927] +* Adding a `RequestBuilder` interface {es-pull}104778[#104778] +* Adding a custom exception for problems with the graph of pipelines to be applied to a document {es-pull}105196[#105196] +* Improving the performance of the ingest simulate verbose API {es-pull}105265[#105265] +* Ingest geoip processor cache 'no results' from the database {es-pull}104092[#104092] +* Limiting the number of nested pipelines that can be executed {es-pull}105428[#105428] +* Modifying request builders {es-pull}104636[#104636] + +Java Low Level REST Client:: +* Set thread name used by REST client {es-pull}103160[#103160] + +Machine Learning:: +* Add optional pruning configuration (weighted terms scoring) to text expansion query {es-pull}102862[#102862] +* Add text_embedding inference service with multilingual-e5 and custom eland models {es-pull}104949[#104949] +* Add 3 automatic restarts for `pytorch_inference` processes that stop unexpectedly {es-pull}104433[#104433] +* Add support for Cohere inference service {es-pull}104559[#104559] +* Always test for spikes and dips as well as changes in the change point aggregation {es-pull}103922[#103922] +* Apply windowing and chunking to long documents {es-pull}104363[#104363] +* Automatically download the ELSER model when PUT in `_inference` {es-pull}104334[#104334] +* Better handling of number of allocations in pytorch_inference in the case that hardware_concurrency fails {ml-pull}2607[#2607] +* Change detection aggregation improvements {es-pull}102824[#102824] +* Conditionally send the dimensions field as part of the openai requests {es-pull}105299[#105299] (issue: {es-issue}105005[#105005]) +* Endpoint to find positions of Grok pattern matches {es-pull}104394[#104394] +* Ensure unique IDs between inference models and trained model deployments {es-pull}103996[#103996] +* Expose some ML metrics via APM {es-pull}102584[#102584] +* Make `task_type` optional in `_inference` APIs {es-pull}104483[#104483] +* Update `missingTrainedModel` message to include: you may need to create it {es-pull}104155[#104155] +* Upgrade MKL to version 2024.0 on Linux x86_64 {ml-pull}2619[#2619] +* Upgrade PyTorch to version 2.1.2. {ml-pull}2588[#2588] +* Upgrade zlib to version 1.2.13 on Windows {ml-pull}2588[#2588] +* Use Boost.JSON for JSON processing {ml-pull}2614[#2614] +* Validate inference model ids {es-pull}103669[#103669] + + +Mapping:: +* Add `index.mapping.total_fields.ignore_dynamic_beyond_limit` setting to ignore dynamic fields when field limit is reached {es-pull}96235[#96235] +* Make field limit more predictable {es-pull}102885[#102885] + +Network:: +* Prune unnecessary information from TransportNodesStatsAction.NodeStatsRequest {es-pull}102559[#102559] (issue: {es-issue}100878[#100878]) + +Percolator:: +* Return `matched_queries` in Percolator {es-pull}103084[#103084] (issue: {es-issue}10163[#10163]) + +Query Languages:: +* Introduce Alias.unwrap method {es-pull}104575[#104575] + +Search:: +* Dyamically adjust node metrics cache expire {es-pull}104460[#104460] +* Enhancement: Metrics for Search Took Times using Action Listeners {es-pull}104996[#104996] +* Field caps performance pt2 {es-pull}105941[#105941] +* Field-caps field has value lookup use map instead of looping array {es-pull}105770[#105770] +* Flag in `_field_caps` to return only fields with values in index {es-pull}103651[#103651] +* Include better output in profiling & `toString` for automaton based queries {es-pull}105468[#105468] +* Metrics for search latencies {es-pull}102557[#102557] +* Ref count search response bytes {es-pull}103763[#103763] (issue: {es-issue}102657[#102657]) +* Remove leniency in msearch parsing {es-pull}103232[#103232] +* Resolve Cluster API {es-pull}102726[#102726] +* Reuse number field mapper tests in other modules {es-pull}99142[#99142] (issue: {es-issue}92947[#92947]) +* S3 first byte latency metric {es-pull}102435[#102435] +* Update s3 latency metric to use micros {es-pull}103633[#103633] +* Upgrade to Lucene 9.10.0 {es-pull}105578[#105578] + +Security:: +* Add Query Users API {es-pull}104033[#104033] +* Add `ApiKey` expiration time to audit log {es-pull}103959[#103959] +* Add expiration time to update api key api {es-pull}103453[#103453] +* Add stricter validation for api key expiration time {es-pull}103973[#103973] +* Add support for the `simple_query_string` to the Query API Key API {es-pull}104132[#104132] +* Add support for the `type` parameter, for sorting, to the Query API Key API {es-pull}104625[#104625] +* Aggs support for Query API Key Information API {es-pull}104895[#104895] +* Hot-reloadable remote cluster credentials {es-pull}102798[#102798] + +Snapshot/Restore:: +* Add s3 `HeadObject` request to request stats {es-pull}105105[#105105] +* Expose `OperationPurpose` via `CustomQueryParameter` to s3 logs {es-pull}105044[#105044] +* Fix blob cache race, decay, time dependency {es-pull}104784[#104784] +* Pause shard snapshots on graceful shutdown {es-pull}101717[#101717] +* Retry indefinitely for s3 indices blob read errors {es-pull}103300[#103300] + +Store:: +* List hidden shard stores by default {es-pull}103710[#103710] + +TLS:: +* 'elasticsearch-certutil cert' now verifies the issuing chain of the generated certificate {es-pull}103948[#103948] + +TSDB:: +* Improve storage efficiency for non-metric fields in TSDB {es-pull}99747[#99747] +* Introduce experimental pass-through field type {es-pull}103648[#103648] +* Nest pass-through objects within objects {es-pull}105062[#105062] +* Restrict usage of certain aggregations when in sort order execution is required {es-pull}104665[#104665] +* Small time series agg improvement {es-pull}106288[#106288] + +Transform:: +* Allow transforms to use PIT with remote clusters again {es-pull}105192[#105192] (issue: {es-issue}104518[#104518]) +* Transforms: Adding basic stats API param {es-pull}104878[#104878] + +Vector Search:: +* Add new int8_flat and flat vector index types {es-pull}104872[#104872] +* Add support for more than one `inner_hit` when searching nested vectors {es-pull}104006[#104006] +* Making `k` and `num_candidates` optional for knn search {es-pull}101209[#101209] (issue: {es-issue}97533[#97533]) + +[[feature-8.13.0]] +[float] +=== New features + +Data streams:: +* Add `require_data_stream` parameter to indexing requests to enforce indexing operations target a data stream {es-pull}101872[#101872] (issue: {es-issue}97032[#97032]) +* Redirect failed ingest node operations to a failure store when available {es-pull}103481[#103481] + +ES|QL:: +* ESQL: Introduce mode setting for ENRICH {es-pull}103949[#103949] +* ESQL: add =~ operator (case insensitive equality) {es-pull}103656[#103656] + +Health:: +* Create a DSL health indicator as part of the health API {es-pull}103130[#103130] + +Infra/Core:: +* Add gradle tasks and code to modify and access mappings between version ids and release versions {es-pull}103627[#103627] + +Mapping:: +* Add `unmatch_mapping_type`, and support array of types {es-pull}103171[#103171] (issues: {es-issue}102807[#102807], {es-issue}102795[#102795]) + +Search:: +* Added Duplicate Word Check Feature to Analysis Nori {es-pull}103325[#103325] (issue: {es-issue}103321[#103321]) +* [Synonyms] Mark Synonyms as GA {es-pull}103223[#103223] + +[[upgrade-8.13.0]] +[float] +=== Upgrades + +Query Languages:: +* Upgrade ANTLR4 to 4.13.1 {es-pull}105334[#105334] (issue: {es-issue}102953[#102953]) + +Search:: +* Upgrade to Lucene 9.9.0 {es-pull}102782[#102782] +* Upgrade to Lucene 9.9.1 {es-pull}103387[#103387] +* Upgrade to Lucene 9.9.2 {es-pull}104753[#104753] + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 92cd447a48deb..25096779521e4 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -62,6 +62,16 @@ fields that don't have a value. This can be done through the newly added {es-pull}103651[#103651] +[discrete] +[[new_lucene_9_10_release]] +=== New Lucene 9.10 release +- https://github.com/apache/lucene/pull/13090: Prevent humongous allocations in ScalarQuantizer when building quantiles. +- https://github.com/apache/lucene/pull/12962: Speedup concurrent multi-segment HNSW graph search +- https://github.com/apache/lucene/pull/13033: Range queries on numeric/date/ip fields now exit earlier on segments whose values don't intersect with the query range. This should especially help when there are other required clauses in the `bool` query and when the range filter is narrow, e.g. filtering on the last 5 minutes. +- https://github.com/apache/lucene/pull/13026: `bool` queries that mix `filter` and `should` clauses will now propagate minimum competitive scores through the `should` clauses. This should yield speedups when sorting by descending score. + +{es-pull}105578[#105578] + // end::notable-highlights[] diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index b9621977ff3aa..f22267357104e 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -108,3 +108,83 @@ teardown: indices.delete: index: .fs-logs-foobar-* - is_true: acknowledged + +--- +"Redirect shard failure in data stream to failure store": + - skip: + version: " - 8.13.99" + reason: "data stream failure stores only redirect shard failures in 8.14+" + features: [allowed_warnings, contains] + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + mappings: + properties: + '@timestamp': + type: date + count: + type: long + + + - do: + index: + index: logs-foobar + refresh: true + body: + '@timestamp': '2020-12-12' + count: 'invalid value' + + - do: + indices.get_data_stream: + name: logs-foobar + - match: { data_streams.0.name: logs-foobar } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { data_streams.0.failure_store: true } + - length: { data_streams.0.failure_indices: 1 } + - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + search: + index: logs-foobar + body: { query: { match_all: {} } } + - length: { hits.hits: 0 } + + - do: + search: + index: .fs-logs-foobar-* + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: "/\\.fs-logs-foobar-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000001/" } + - exists: hits.hits.0._source.@timestamp + - not_exists: hits.hits.0._source.count + - match: { hits.hits.0._source.document.index: 'logs-foobar' } + - match: { hits.hits.0._source.document.source.@timestamp: '2020-12-12' } + - match: { hits.hits.0._source.document.source.count: 'invalid value' } + - match: { hits.hits.0._source.error.type: 'document_parsing_exception' } + - contains: { hits.hits.0._source.error.message: "failed to parse field [count] of type [long] in document with id " } + - contains: { hits.hits.0._source.error.message: "Preview of field's value: 'invalid value'" } + - contains: { hits.hits.0._source.error.stack_trace: "org.elasticsearch.index.mapper.DocumentParsingException: " } + - contains: { hits.hits.0._source.error.stack_trace: "failed to parse field [count] of type [long] in document with id" } + - contains: { hits.hits.0._source.error.stack_trace: "Preview of field's value: 'invalid value'" } + + - do: + indices.delete_data_stream: + name: logs-foobar + - is_true: acknowledged + + - do: + indices.delete: + index: .fs-logs-foobar-* + - is_true: acknowledged diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index beaf8723df4d5..49fc88a15f7d3 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -7,6 +7,7 @@ */ import org.elasticsearch.gradle.internal.test.RestIntegTestTask +import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-yaml-rest-test' @@ -81,6 +82,7 @@ tasks.named("dependencyLicenses").configure { tasks.withType(RestIntegTestTask).configureEach { usesDefaultDistribution() + BuildParams.withFipsEnabledOnly(it) jvmArgs '--add-exports', 'java.security.jgss/sun.security.krb5=ALL-UNNAMED' } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml index 049b9670b6b46..57ad446eaf637 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml @@ -278,6 +278,7 @@ synthetic source text field: type: keyword name: type: text + store: false value: type: long time_series_metric: gauge diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 282e29866a699..64f04d46a9a90 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; @@ -78,7 +79,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Function; -import java.util.function.Predicate; import java.util.stream.IntStream; import static java.util.Collections.singletonList; @@ -809,8 +809,23 @@ public Map getMetadataMappers() { } @Override - public Function> getFieldFilter() { - return index -> field -> field.equals("playlist") == false; + public Function getFieldFilter() { + return index -> new FieldPredicate() { + @Override + public boolean test(String field) { + return field.equals("playlist") == false; + } + + @Override + public String modifyHash(String hash) { + return "not-playlist:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; } } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 241af6e7b6c45..3a4958e046a82 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -119,6 +119,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_17 = new Version(7_17_17_99); public static final Version V_7_17_18 = new Version(7_17_18_99); public static final Version V_7_17_19 = new Version(7_17_19_99); + public static final Version V_7_17_20 = new Version(7_17_20_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); @@ -166,8 +167,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version V_8_12_1 = new Version(8_12_01_99); public static final Version V_8_12_2 = new Version(8_12_02_99); - public static final Version V_8_12_3 = new Version(8_12_03_99); public static final Version V_8_13_0 = new Version(8_13_00_99); + public static final Version V_8_13_1 = new Version(8_13_01_99); public static final Version V_8_14_0 = new Version(8_14_00_99); public static final Version CURRENT = V_8_14_0; diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index fdef41acb16da..685fc032431c3 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -12,13 +12,11 @@ import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -26,7 +24,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.net.URLEncoder; @@ -34,7 +31,6 @@ import java.util.Locale; import java.util.Objects; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -43,14 +39,14 @@ */ public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, ToXContentObject { - private static final String _SHARDS = "_shards"; - private static final String _INDEX = "_index"; - private static final String _ID = "_id"; - private static final String _VERSION = "_version"; - private static final String _SEQ_NO = "_seq_no"; - private static final String _PRIMARY_TERM = "_primary_term"; - private static final String RESULT = "result"; - private static final String FORCED_REFRESH = "forced_refresh"; + public static final String _SHARDS = "_shards"; + public static final String _INDEX = "_index"; + public static final String _ID = "_id"; + public static final String _VERSION = "_version"; + public static final String _SEQ_NO = "_seq_no"; + public static final String _PRIMARY_TERM = "_primary_term"; + public static final String RESULT = "result"; + public static final String FORCED_REFRESH = "forced_refresh"; /** * An enum that represents the results of CRUD operations, primarily used to communicate the type of @@ -302,54 +298,6 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t return builder; } - /** - * Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method. - * - * This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning - * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly - * if needed and then immediately returns. - */ - public static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException { - XContentParser.Token token = parser.currentToken(); - ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); - - String currentFieldName = parser.currentName(); - token = parser.nextToken(); - - if (token.isValue()) { - if (_INDEX.equals(currentFieldName)) { - // index uuid and shard id are unknown and can't be parsed back for now. - context.setShardId(new ShardId(new Index(parser.text(), IndexMetadata.INDEX_UUID_NA_VALUE), -1)); - } else if (_ID.equals(currentFieldName)) { - context.setId(parser.text()); - } else if (_VERSION.equals(currentFieldName)) { - context.setVersion(parser.longValue()); - } else if (RESULT.equals(currentFieldName)) { - String result = parser.text(); - for (Result r : Result.values()) { - if (r.getLowercase().equals(result)) { - context.setResult(r); - break; - } - } - } else if (FORCED_REFRESH.equals(currentFieldName)) { - context.setForcedRefresh(parser.booleanValue()); - } else if (_SEQ_NO.equals(currentFieldName)) { - context.setSeqNo(parser.longValue()); - } else if (_PRIMARY_TERM.equals(currentFieldName)) { - context.setPrimaryTerm(parser.longValue()); - } - } else if (token == XContentParser.Token.START_OBJECT) { - if (_SHARDS.equals(currentFieldName)) { - context.setShardInfo(ShardInfo.fromXContent(parser)); - } else { - parser.skipChildren(); // skip potential inner objects for forward compatibility - } - } else if (token == XContentParser.Token.START_ARRAY) { - parser.skipChildren(); // skip potential inner arrays for forward compatibility - } - } - /** * Base class of all {@link DocWriteResponse} builders. These {@link DocWriteResponse.Builder} are used during * xcontent parsing to temporarily store the parsed values, then the {@link Builder#build()} method is called to diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java index 5d66baf0216ad..4a98ff62f6293 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatus.java @@ -8,26 +8,17 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.xcontent.XContentParserUtils; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Objects; -import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; /** * Represents snapshot status of all shards in the index @@ -118,45 +109,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - static final ObjectParser.NamedObjectParser PARSER; - static { - ConstructingObjectParser innerParser = new ConstructingObjectParser<>( - "snapshot_index_status", - true, - (Object[] parsedObjects, String index) -> { - int i = 0; - SnapshotShardsStats shardsStats = ((SnapshotShardsStats) parsedObjects[i++]); - SnapshotStats stats = ((SnapshotStats) parsedObjects[i++]); - @SuppressWarnings("unchecked") - List shardStatuses = (List) parsedObjects[i]; - - final Map indexShards; - if (shardStatuses == null || shardStatuses.isEmpty()) { - indexShards = emptyMap(); - } else { - indexShards = Maps.newMapWithExpectedSize(shardStatuses.size()); - for (SnapshotIndexShardStatus shardStatus : shardStatuses) { - indexShards.put(shardStatus.getShardId().getId(), shardStatus); - } - } - return new SnapshotIndexStatus(index, indexShards, shardsStats, stats); - } - ); - innerParser.declareObject( - constructorArg(), - (p, c) -> SnapshotShardsStats.PARSER.apply(p, null), - new ParseField(SnapshotShardsStats.Fields.SHARDS_STATS) - ); - innerParser.declareObject(constructorArg(), (p, c) -> SnapshotStats.fromXContent(p), new ParseField(SnapshotStats.Fields.STATS)); - innerParser.declareNamedObjects(constructorArg(), SnapshotIndexShardStatus.PARSER, new ParseField(Fields.SHARDS)); - PARSER = ((p, c, name) -> innerParser.apply(p, name)); - } - - public static SnapshotIndexStatus fromXContent(XContentParser parser) throws IOException { - XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); - return PARSER.parse(parser, null, parser.currentName()); - } - @Override public boolean equals(Object o) { if (this == o) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java index 5bbc5368505db..28806b0aca87e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java @@ -8,18 +8,13 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collection; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - /** * Status of a snapshot shards */ @@ -129,33 +124,6 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par return builder; } - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - Fields.SHARDS_STATS, - true, - (Object[] parsedObjects) -> { - int i = 0; - int initializingShards = (int) parsedObjects[i++]; - int startedShards = (int) parsedObjects[i++]; - int finalizingShards = (int) parsedObjects[i++]; - int doneShards = (int) parsedObjects[i++]; - int failedShards = (int) parsedObjects[i++]; - int totalShards = (int) parsedObjects[i]; - return new SnapshotShardsStats(initializingShards, startedShards, finalizingShards, doneShards, failedShards, totalShards); - } - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(Fields.INITIALIZING)); - PARSER.declareInt(constructorArg(), new ParseField(Fields.STARTED)); - PARSER.declareInt(constructorArg(), new ParseField(Fields.FINALIZING)); - PARSER.declareInt(constructorArg(), new ParseField(Fields.DONE)); - PARSER.declareInt(constructorArg(), new ParseField(Fields.FAILED)); - PARSER.declareInt(constructorArg(), new ParseField(Fields.TOTAL)); - } - - public static SnapshotShardsStats fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 956ce57d168e0..e228ad18641fe 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; @@ -19,12 +18,7 @@ import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; import org.elasticsearch.snapshots.Snapshot; -import org.elasticsearch.snapshots.SnapshotId; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -34,11 +28,7 @@ import java.util.Map; import java.util.Objects; -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Status of a snapshot @@ -87,7 +77,7 @@ public class SnapshotStatus implements ChunkedToXContentObject, Writeable { updateShardStats(startTime, time); } - private SnapshotStatus( + SnapshotStatus( Snapshot snapshot, State state, List shards, @@ -182,12 +172,12 @@ public SnapshotStats getStats() { return stats; } - private static final String SNAPSHOT = "snapshot"; - private static final String REPOSITORY = "repository"; - private static final String UUID = "uuid"; - private static final String STATE = "state"; - private static final String INDICES = "indices"; - private static final String INCLUDE_GLOBAL_STATE = "include_global_state"; + static final String SNAPSHOT = "snapshot"; + static final String REPOSITORY = "repository"; + static final String UUID = "uuid"; + static final String STATE = "state"; + static final String INDICES = "indices"; + static final String INCLUDE_GLOBAL_STATE = "include_global_state"; @Override public Iterator toXContentChunked(ToXContent.Params params) { @@ -206,59 +196,6 @@ public Iterator toXContentChunked(ToXContent.Params params }), getIndices().values().iterator(), Iterators.single((b, p) -> b.endObject().endObject())); } - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "snapshot_status", - true, - (Object[] parsedObjects) -> { - int i = 0; - String name = (String) parsedObjects[i++]; - String repository = (String) parsedObjects[i++]; - String uuid = (String) parsedObjects[i++]; - String rawState = (String) parsedObjects[i++]; - Boolean includeGlobalState = (Boolean) parsedObjects[i++]; - SnapshotStats stats = ((SnapshotStats) parsedObjects[i++]); - SnapshotShardsStats shardsStats = ((SnapshotShardsStats) parsedObjects[i++]); - @SuppressWarnings("unchecked") - List indices = ((List) parsedObjects[i]); - - Snapshot snapshot = new Snapshot(repository, new SnapshotId(name, uuid)); - SnapshotsInProgress.State state = SnapshotsInProgress.State.valueOf(rawState); - Map indicesStatus; - List shards; - if (indices == null || indices.isEmpty()) { - indicesStatus = emptyMap(); - shards = emptyList(); - } else { - indicesStatus = Maps.newMapWithExpectedSize(indices.size()); - shards = new ArrayList<>(); - for (SnapshotIndexStatus index : indices) { - indicesStatus.put(index.getIndex(), index); - shards.addAll(index.getShards().values()); - } - } - return new SnapshotStatus(snapshot, state, shards, indicesStatus, shardsStats, stats, includeGlobalState); - } - ); - static { - PARSER.declareString(constructorArg(), new ParseField(SNAPSHOT)); - PARSER.declareString(constructorArg(), new ParseField(REPOSITORY)); - PARSER.declareString(constructorArg(), new ParseField(UUID)); - PARSER.declareString(constructorArg(), new ParseField(STATE)); - PARSER.declareBoolean(optionalConstructorArg(), new ParseField(INCLUDE_GLOBAL_STATE)); - PARSER.declareField( - constructorArg(), - SnapshotStats::fromXContent, - new ParseField(SnapshotStats.Fields.STATS), - ObjectParser.ValueType.OBJECT - ); - PARSER.declareObject(constructorArg(), SnapshotShardsStats.PARSER, new ParseField(SnapshotShardsStats.Fields.SHARDS_STATS)); - PARSER.declareNamedObjects(constructorArg(), SnapshotIndexStatus.PARSER, new ParseField(INDICES)); - } - - public static SnapshotStatus fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - private void updateShardStats(long startTime, long time) { stats = new SnapshotStats(startTime, time, 0, 0, 0, 0, 0, 0); shardsStats = new SnapshotShardsStats(shards); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 1d95f430d5c7e..1e9b1446850af 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -16,18 +16,21 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.RoutingMissingException; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; @@ -39,11 +42,16 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.LongSupplier; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.EXCLUDED_DATA_STREAMS_KEY; @@ -59,14 +67,16 @@ final class BulkOperation extends ActionRunnable { private final Task task; private final ThreadPool threadPool; private final ClusterService clusterService; - private BulkRequest bulkRequest; // set to null once all requests are sent out + private BulkRequest bulkRequest; // set to null once all requests are completed private final ActionListener listener; private final AtomicArray responses; + private final ConcurrentLinkedQueue failureStoreRedirects = new ConcurrentLinkedQueue<>(); private final long startTimeNanos; private final ClusterStateObserver observer; private final Map indicesThatCannotBeCreated; private final String executorName; private final LongSupplier relativeTimeProvider; + private final FailureStoreDocumentConverter failureStoreDocumentConverter; private IndexNameExpressionResolver indexNameExpressionResolver; private NodeClient client; @@ -83,6 +93,40 @@ final class BulkOperation extends ActionRunnable { LongSupplier relativeTimeProvider, long startTimeNanos, ActionListener listener + ) { + this( + task, + threadPool, + executorName, + clusterService, + bulkRequest, + client, + responses, + indicesThatCannotBeCreated, + indexNameExpressionResolver, + relativeTimeProvider, + startTimeNanos, + listener, + new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()), + new FailureStoreDocumentConverter() + ); + } + + BulkOperation( + Task task, + ThreadPool threadPool, + String executorName, + ClusterService clusterService, + BulkRequest bulkRequest, + NodeClient client, + AtomicArray responses, + Map indicesThatCannotBeCreated, + IndexNameExpressionResolver indexNameExpressionResolver, + LongSupplier relativeTimeProvider, + long startTimeNanos, + ActionListener listener, + ClusterStateObserver observer, + FailureStoreDocumentConverter failureStoreDocumentConverter ) { super(listener); this.task = task; @@ -97,68 +141,90 @@ final class BulkOperation extends ActionRunnable { this.relativeTimeProvider = relativeTimeProvider; this.indexNameExpressionResolver = indexNameExpressionResolver; this.client = client; - this.observer = new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext()); + this.observer = observer; + this.failureStoreDocumentConverter = failureStoreDocumentConverter; } @Override protected void doRun() { assert bulkRequest != null; final ClusterState clusterState = observer.setAndGetObservedState(); - if (handleBlockExceptions(clusterState)) { + if (handleBlockExceptions(clusterState, BulkOperation.this, this::onFailure)) { + return; + } + Map> requestsByShard = groupBulkRequestsByShards(clusterState); + executeBulkRequestsByShard(requestsByShard, clusterState, this::redirectFailuresOrCompleteBulkOperation); + } + + private void doRedirectFailures() { + assert failureStoreRedirects.isEmpty() != true : "Attempting to redirect failures, but none were present in the queue"; + final ClusterState clusterState = observer.setAndGetObservedState(); + // If the cluster is blocked at this point, discard the failure store redirects and complete the response with the original failures + if (handleBlockExceptions(clusterState, ActionRunnable.run(listener, this::doRedirectFailures), this::discardRedirectsAndFinish)) { return; } - Map> requestsByShard = groupRequestsByShards(clusterState); - executeBulkRequestsByShard(requestsByShard, clusterState); + Map> requestsByShard = drainAndGroupRedirectsByShards(clusterState); + executeBulkRequestsByShard(requestsByShard, clusterState, this::completeBulkOperation); } private long buildTookInMillis(long startTimeNanos) { return TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeNanos); } - private Map> groupRequestsByShards(ClusterState clusterState) { + private Map> groupBulkRequestsByShards(ClusterState clusterState) { + return groupRequestsByShards( + clusterState, + Iterators.enumerate(bulkRequest.requests.iterator(), BulkItemRequest::new), + BulkOperation::validateWriteIndex + ); + } + + private Map> drainAndGroupRedirectsByShards(ClusterState clusterState) { + return groupRequestsByShards( + clusterState, + Iterators.fromSupplier(failureStoreRedirects::poll), + (ia, ignore) -> validateRedirectIndex(ia) + ); + } + + private Map> groupRequestsByShards( + ClusterState clusterState, + Iterator it, + BiConsumer> indexOperationValidator + ) { final ConcreteIndices concreteIndices = new ConcreteIndices(clusterState, indexNameExpressionResolver); Metadata metadata = clusterState.metadata(); // Group the requests by ShardId -> Operations mapping Map> requestsByShard = new HashMap<>(); - for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocWriteRequest docWriteRequest = bulkRequest.requests.get(i); + while (it.hasNext()) { + BulkItemRequest bulkItemRequest = it.next(); + DocWriteRequest docWriteRequest = bulkItemRequest.request(); + // the request can only be null because we set it to null in the previous step, so it gets ignored if (docWriteRequest == null) { continue; } - if (addFailureIfRequiresAliasAndAliasIsMissing(docWriteRequest, i, metadata)) { + if (addFailureIfRequiresAliasAndAliasIsMissing(docWriteRequest, bulkItemRequest.id(), metadata)) { continue; } - if (addFailureIfIndexCannotBeCreated(docWriteRequest, i)) { + if (addFailureIfIndexCannotBeCreated(docWriteRequest, bulkItemRequest.id())) { continue; } - if (addFailureIfRequiresDataStreamAndNoParentDataStream(docWriteRequest, i, metadata)) { + if (addFailureIfRequiresDataStreamAndNoParentDataStream(docWriteRequest, bulkItemRequest.id(), metadata)) { continue; } IndexAbstraction ia = null; - boolean includeDataStreams = docWriteRequest.opType() == DocWriteRequest.OpType.CREATE; try { ia = concreteIndices.resolveIfAbsent(docWriteRequest); - if (ia.isDataStreamRelated() && includeDataStreams == false) { - throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); - } - // The ConcreteIndices#resolveIfAbsent(...) method validates via IndexNameExpressionResolver whether - // an operation is allowed in index into a data stream, but this isn't done when resolve call is cached, so - // the validation needs to be performed here too. - if (ia.getParentDataStream() != null && - // avoid valid cases when directly indexing into a backing index - // (for example when directly indexing into .ds-logs-foobar-000001) - ia.getName().equals(docWriteRequest.index()) == false && docWriteRequest.opType() != DocWriteRequest.OpType.CREATE) { - throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); - } + indexOperationValidator.accept(ia, docWriteRequest); TransportBulkAction.prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); TransportBulkAction.prohibitAppendWritesInBackingIndices(docWriteRequest, metadata); docWriteRequest.routing(metadata.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); final Index concreteIndex = docWriteRequest.getConcreteWriteIndex(ia, metadata); - if (addFailureIfIndexIsClosed(docWriteRequest, concreteIndex, i, metadata)) { + if (addFailureIfIndexIsClosed(docWriteRequest, concreteIndex, bulkItemRequest.id(), metadata)) { continue; } IndexRouting indexRouting = concreteIndices.routing(concreteIndex); @@ -168,37 +234,56 @@ private Map> groupRequestsByShards(ClusterState c new ShardId(concreteIndex, shardId), shard -> new ArrayList<>() ); - shardRequests.add(new BulkItemRequest(i, docWriteRequest)); + shardRequests.add(bulkItemRequest); } catch (ElasticsearchParseException | IllegalArgumentException | RoutingMissingException | ResourceNotFoundException e) { String name = ia != null ? ia.getName() : docWriteRequest.index(); - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(name, docWriteRequest.id(), e); - BulkItemResponse bulkItemResponse = BulkItemResponse.failure(i, docWriteRequest.opType(), failure); - responses.set(i, bulkItemResponse); - // make sure the request gets never processed again - bulkRequest.requests.set(i, null); + addFailureAndDiscardRequest(docWriteRequest, bulkItemRequest.id(), name, e); } } return requestsByShard; } - private void executeBulkRequestsByShard(Map> requestsByShard, ClusterState clusterState) { + /** + * Validates that an index abstraction is capable of receiving the provided write request + */ + private static void validateWriteIndex(IndexAbstraction ia, DocWriteRequest docWriteRequest) { + boolean includeDataStreams = docWriteRequest.opType() == DocWriteRequest.OpType.CREATE; + if (ia.isDataStreamRelated() && includeDataStreams == false) { + throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); + } + // The ConcreteIndices#resolveIfAbsent(...) method validates via IndexNameExpressionResolver whether + // an operation is allowed in index into a data stream, but this isn't done when resolve call is cached, so + // the validation needs to be performed here too. + if (ia.getParentDataStream() != null && + // avoid valid cases when directly indexing into a backing index + // (for example when directly indexing into .ds-logs-foobar-000001) + ia.getName().equals(docWriteRequest.index()) == false && docWriteRequest.opType() != DocWriteRequest.OpType.CREATE) { + throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); + } + } + + /** + * Validates that an index abstraction is capable of receiving a failure store redirect + */ + private static void validateRedirectIndex(IndexAbstraction ia) { + if (ia.isDataStreamRelated() == false) { + // We should only be dealing with traffic targeting concrete data streams. + throw new IllegalArgumentException("only write ops to data streams with enabled failure stores can be redirected on failure."); + } + } + + private void executeBulkRequestsByShard( + Map> requestsByShard, + ClusterState clusterState, + Runnable onRequestsCompleted + ) { if (requestsByShard.isEmpty()) { - listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) - ); + onRequestsCompleted.run(); return; } String nodeId = clusterService.localNode().getId(); - Runnable onBulkItemsComplete = () -> { - listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) - ); - // Allow memory for bulk shard request items to be reclaimed before all items have been completed - bulkRequest = null; - }; - - try (RefCountingRunnable bulkItemRequestCompleteRefCount = new RefCountingRunnable(onBulkItemsComplete)) { + try (RefCountingRunnable bulkItemRequestCompleteRefCount = new RefCountingRunnable(onRequestsCompleted)) { for (Map.Entry> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List requests = entry.getValue(); @@ -219,18 +304,75 @@ private void executeBulkRequestsByShard(Map> requ } } + private void redirectFailuresOrCompleteBulkOperation() { + if (DataStream.isFailureStoreEnabled() && failureStoreRedirects.isEmpty() == false) { + doRedirectFailures(); + } else { + completeBulkOperation(); + } + } + + private void completeBulkOperation() { + listener.onResponse( + new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) + ); + // Allow memory for bulk shard request items to be reclaimed before all items have been completed + bulkRequest = null; + } + + /** + * Discards all failure store redirections and completes the bulk request. + * @param exception any documents that could have been redirected will have this exception added as a suppressed exception + * on their original failure information. + */ + private void discardRedirectsAndFinish(Exception exception) { + assert failureStoreRedirects.isEmpty() != true : "Attempting to discard redirects, but there were none to discard"; + Iterator redirectedBulkItemIterator = Iterators.fromSupplier(failureStoreRedirects::poll); + while (redirectedBulkItemIterator.hasNext()) { + BulkItemRequest cancelledRedirectBulkItem = redirectedBulkItemIterator.next(); + int slot = cancelledRedirectBulkItem.id(); + BulkItemResponse originalFailure = responses.get(slot); + if (originalFailure.isFailed()) { + originalFailure.getFailure().getCause().addSuppressed(exception); + } + } + completeBulkOperation(); + } + private void executeBulkShardRequest(BulkShardRequest bulkShardRequest, Releasable releaseOnFinish) { client.executeLocally(TransportShardBulkAction.TYPE, bulkShardRequest, new ActionListener<>() { + + // Lazily get the cluster state to avoid keeping it around longer than it is needed + private ClusterState clusterState = null; + + private ClusterState getClusterState() { + if (clusterState == null) { + clusterState = clusterService.state(); + } + return clusterState; + } + @Override public void onResponse(BulkShardResponse bulkShardResponse) { - for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { - // we may have no response if item failed - if (bulkItemResponse.getResponse() != null) { + for (int idx = 0; idx < bulkShardResponse.getResponses().length; idx++) { + // We zip the requests and responses together so that we can identify failed documents and potentially store them + BulkItemResponse bulkItemResponse = bulkShardResponse.getResponses()[idx]; + + if (bulkItemResponse.isFailed()) { + BulkItemRequest bulkItemRequest = bulkShardRequest.items()[idx]; + assert bulkItemRequest.id() == bulkItemResponse.getItemId() : "Bulk items were returned out of order"; + + String failureStoreReference = getRedirectTarget(bulkItemRequest.request(), getClusterState().metadata()); + if (failureStoreReference != null) { + addDocumentToRedirectRequests(bulkItemRequest, bulkItemResponse.getFailure().getCause(), failureStoreReference); + } + addFailure(bulkItemResponse); + } else { bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + responses.set(bulkItemResponse.getItemId(), bulkItemResponse); } - responses.set(bulkItemResponse.getItemId(), bulkItemResponse); } - releaseOnFinish.close(); + completeShardOperation(); } @Override @@ -239,33 +381,135 @@ public void onFailure(Exception e) { for (BulkItemRequest request : bulkShardRequest.items()) { final String indexName = request.index(); DocWriteRequest docWriteRequest = request.request(); - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e); - responses.set(request.id(), BulkItemResponse.failure(request.id(), docWriteRequest.opType(), failure)); + + String failureStoreReference = getRedirectTarget(docWriteRequest, getClusterState().metadata()); + if (failureStoreReference != null) { + addDocumentToRedirectRequests(request, e, failureStoreReference); + } + addFailure(docWriteRequest, request.id(), indexName, e); } + completeShardOperation(); + } + + private void completeShardOperation() { + // Clear our handle on the cluster state to allow it to be cleaned up + clusterState = null; releaseOnFinish.close(); } }); } - private boolean handleBlockExceptions(ClusterState state) { + /** + * Determines if the write request can be redirected if it fails. Write requests can be redirected IFF they are targeting a data stream + * with a failure store and are not already redirected themselves. If the document can be redirected, the data stream name to use for + * the redirection is returned. + * + * @param docWriteRequest the write request to check + * @param metadata cluster state metadata for resolving index abstractions + * @return a data stream name if the write request points to a data stream that has the failure store enabled, + * or {@code null} if it does + */ + private static String getRedirectTarget(DocWriteRequest docWriteRequest, Metadata metadata) { + // Feature flag guard + if (DataStream.isFailureStoreEnabled() == false) { + return null; + } + // Do not resolve a failure store for documents that were already headed to one + if (docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore()) { + return null; + } + // If there is no index abstraction, then the request is using a pattern of some sort, which data streams do not support + IndexAbstraction ia = metadata.getIndicesLookup().get(docWriteRequest.index()); + if (ia == null) { + return null; + } + if (ia.isDataStreamRelated()) { + // The index abstraction could be an alias. Alias abstractions (even for data streams) only keep track of which _index_ they + // will write to, not which _data stream_. + // We work backward to find the data stream from the concrete write index to cover this case. + Index concreteIndex = ia.getWriteIndex(); + IndexAbstraction writeIndexAbstraction = metadata.getIndicesLookup().get(concreteIndex.getName()); + DataStream parentDataStream = writeIndexAbstraction.getParentDataStream(); + if (parentDataStream != null && parentDataStream.isFailureStore()) { + // Keep the data stream name around to resolve the redirect to failure store if the shard level request fails. + return parentDataStream.getName(); + } + } + return null; + } + + /** + * Marks a failed bulk item for redirection. At the end of the first round of shard requests, any documents in the + * redirect list are processed to their final destinations. + * + * @param request The bulk item request that failed + * @param cause The exception for the experienced the failure + * @param failureStoreReference The data stream that contains the failure store for this item + */ + private void addDocumentToRedirectRequests(BulkItemRequest request, Exception cause, String failureStoreReference) { + // Convert the document into a failure document + IndexRequest failureStoreRequest; + try { + failureStoreRequest = failureStoreDocumentConverter.transformFailedRequest( + TransportBulkAction.getIndexWriteRequest(request.request()), + cause, + failureStoreReference, + threadPool::absoluteTimeInMillis + ); + } catch (IOException ioException) { + logger.debug( + () -> "Could not transform failed bulk request item into failure store document. Attempted for [" + + request.request().opType() + + ": index=" + + request.index() + + "; id=" + + request.request().id() + + "; bulk_slot=" + + request.id() + + "] Proceeding with failing the original.", + ioException + ); + // Suppress and do not redirect + cause.addSuppressed(ioException); + return; + } + + // Store for second phase + BulkItemRequest redirected = new BulkItemRequest(request.id(), failureStoreRequest); + failureStoreRedirects.add(redirected); + } + + /** + * Examine the cluster state for blocks before continuing. If any block exists in the cluster state, this function will return + * {@code true}. If the block is retryable, the {@code retryOperation} runnable will be called asynchronously if the cluster ever + * becomes unblocked. If a non retryable block exists, or if we encounter a timeout before the blocks could be cleared, the + * {@code onClusterBlocked} consumer will be invoked with the cluster block exception. + * + * @param state The current state to check for blocks + * @param retryOperation If retryable blocks exist, the runnable to execute after they have cleared. + * @param onClusterBlocked Consumes the block exception if the cluster has a non retryable block or if we encounter a timeout while + * waiting for a block to clear. + * @return {@code true} if the cluster is currently blocked at all, {@code false} if the cluster has no blocks. + */ + private boolean handleBlockExceptions(ClusterState state, Runnable retryOperation, Consumer onClusterBlocked) { ClusterBlockException blockException = state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); if (blockException != null) { if (blockException.retryable()) { logger.trace("cluster is blocked, scheduling a retry", blockException); - retry(blockException); + retry(blockException, retryOperation, onClusterBlocked); } else { - onFailure(blockException); + onClusterBlocked.accept(blockException); } return true; } return false; } - void retry(Exception failure) { + void retry(Exception failure, final Runnable operation, final Consumer onClusterBlocked) { assert failure != null; if (observer.isTimedOut()) { - // we running as a last attempt after a timeout has happened. don't retry - onFailure(failure); + // we are running as a last attempt after a timeout has happened. don't retry + onClusterBlocked.accept(failure); return; } observer.waitForNextChange(new ClusterStateObserver.Listener() { @@ -282,6 +526,8 @@ public void onNewClusterState(ClusterState state) { @Override public void onClusterServiceClose() { + // There is very little we can do about this, and our time in this JVM is likely short. + // Let's just try to get out of here ASAP. onFailure(new NodeClosedException(clusterService.localNode())); } @@ -297,7 +543,7 @@ public void onTimeout(TimeValue timeout) { } private void dispatchRetry() { - threadPool.executor(executorName).submit(BulkOperation.this); + threadPool.executor(executorName).submit(operation); } }); } @@ -308,7 +554,7 @@ private boolean addFailureIfRequiresAliasAndAliasIsMissing(DocWriteRequest re "[" + DocWriteRequest.REQUIRE_ALIAS + "] request flag is [true] and [" + request.index() + "] is not an alias", request.index() ); - addFailure(request, idx, exception); + addFailureAndDiscardRequest(request, idx, request.index(), exception); return true; } return false; @@ -320,7 +566,7 @@ private boolean addFailureIfRequiresDataStreamAndNoParentDataStream(DocWriteRequ "[" + DocWriteRequest.REQUIRE_DATA_STREAM + "] request flag is [true] and [" + request.index() + "] is not a data stream", request.index() ); - addFailure(request, idx, exception); + addFailureAndDiscardRequest(request, idx, request.index(), exception); return true; } return false; @@ -329,7 +575,7 @@ private boolean addFailureIfRequiresDataStreamAndNoParentDataStream(DocWriteRequ private boolean addFailureIfIndexIsClosed(DocWriteRequest request, Index concreteIndex, int idx, final Metadata metadata) { IndexMetadata indexMetadata = metadata.getIndexSafe(concreteIndex); if (indexMetadata.getState() == IndexMetadata.State.CLOSE) { - addFailure(request, idx, new IndexClosedException(concreteIndex)); + addFailureAndDiscardRequest(request, idx, request.index(), new IndexClosedException(concreteIndex)); return true; } return false; @@ -338,20 +584,73 @@ private boolean addFailureIfIndexIsClosed(DocWriteRequest request, Index conc private boolean addFailureIfIndexCannotBeCreated(DocWriteRequest request, int idx) { IndexNotFoundException cannotCreate = indicesThatCannotBeCreated.get(request.index()); if (cannotCreate != null) { - addFailure(request, idx, cannotCreate); + addFailureAndDiscardRequest(request, idx, request.index(), cannotCreate); return true; } return false; } - private void addFailure(DocWriteRequest request, int idx, Exception unavailableException) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(request.index(), request.id(), unavailableException); - BulkItemResponse bulkItemResponse = BulkItemResponse.failure(idx, request.opType(), failure); - responses.set(idx, bulkItemResponse); + /** + * Like {@link BulkOperation#addFailure(DocWriteRequest, int, String, Exception)} but this method will remove the corresponding entry + * from the working bulk request so that it never gets processed again during this operation. + */ + private void addFailureAndDiscardRequest(DocWriteRequest request, int idx, String index, Exception exception) { + addFailure(request, idx, index, exception); // make sure the request gets never processed again bulkRequest.requests.set(idx, null); } + /** + * Checks if a bulk item response exists for this entry. If none exists, a failure response is created and set in the response array. + * If a response exists already, the failure information provided to this call will be added to the existing failure as a suppressed + * exception. + * + * @param request The document write request that should be failed + * @param idx The slot of the bulk entry this request corresponds to + * @param index The resource that this entry was being written to when it failed + * @param exception The exception encountered for this entry + * @see BulkOperation#addFailure(BulkItemResponse) BulkOperation.addFailure if you have a bulk item response object already + */ + private void addFailure(DocWriteRequest request, int idx, String index, Exception exception) { + BulkItemResponse bulkItemResponse = responses.get(idx); + if (bulkItemResponse == null) { + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(index, request.id(), exception); + bulkItemResponse = BulkItemResponse.failure(idx, request.opType(), failure); + } else { + // Response already recorded. We should only be here if the existing response is a failure and + // we are encountering a new failure while redirecting. + assert bulkItemResponse.isFailed() : "Attempting to overwrite successful bulk item result with a failure"; + bulkItemResponse.getFailure().getCause().addSuppressed(exception); + } + // Always replace the item in the responses for thread visibility of any mutations + responses.set(idx, bulkItemResponse); + } + + /** + * Checks if a bulk item response exists for this entry. If none exists, the failure is set in the response array. If a response exists + * already, the failure information provided to this call will be added to the existing failure as a suppressed exception. + * + * @param bulkItemResponse the item response to add to the overall result array + * @see BulkOperation#addFailure(DocWriteRequest, int, String, Exception) BulkOperation.addFailure which conditionally creates the + * failure response only when one does not exist already + */ + private void addFailure(BulkItemResponse bulkItemResponse) { + assert bulkItemResponse.isFailed() : "Attempting to add a successful bulk item response via the addFailure method"; + BulkItemResponse existingBulkItemResponse = responses.get(bulkItemResponse.getItemId()); + if (existingBulkItemResponse != null) { + // Response already recorded. We should only be here if the existing response is a failure and + // we are encountering a new failure while redirecting. + assert existingBulkItemResponse.isFailed() : "Attempting to overwrite successful bulk item result with a failure"; + existingBulkItemResponse.getFailure().getCause().addSuppressed(bulkItemResponse.getFailure().getCause()); + bulkItemResponse = existingBulkItemResponse; + } + // Always replace the item in the responses for thread visibility of any mutations + responses.set(bulkItemResponse.getItemId(), bulkItemResponse); + } + + /** + * Resolves and caches index and routing abstractions to more efficiently group write requests into shards. + */ private static class ConcreteIndices { private final ClusterState state; private final IndexNameExpressionResolver indexNameExpressionResolver; @@ -363,6 +662,13 @@ private static class ConcreteIndices { this.indexNameExpressionResolver = indexNameExpressionResolver; } + /** + * Resolves the index abstraction that the write request is targeting, potentially obtaining it from a cache. This instance isn't + * fully resolved, meaning that {@link IndexAbstraction#getWriteIndex()} should be invoked in order to get concrete write index. + * + * @param request a write request + * @return the index abstraction that the write request is targeting + */ IndexAbstraction resolveIfAbsent(DocWriteRequest request) { try { IndexAbstraction indexAbstraction = indexAbstractions.get(request.index()); @@ -380,6 +686,12 @@ IndexAbstraction resolveIfAbsent(DocWriteRequest request) { } } + /** + * Determines which routing strategy to use for a document being written to the provided index, potentially obtaining the result + * from a cache. + * @param index the index to determine routing strategy for + * @return an {@link IndexRouting} object to use for assigning a write request to a shard + */ IndexRouting routing(Index index) { IndexRouting routing = routings.get(index); if (routing == null) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java index 5e630bf9cdef5..2112ad48bec62 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestModifier.java @@ -53,6 +53,7 @@ final class BulkRequestModifier implements Iterator> { final SparseFixedBitSet failedSlots; final List itemResponses; final AtomicIntegerArray originalSlots; + final FailureStoreDocumentConverter failureStoreDocumentConverter; volatile int currentSlot = -1; @@ -61,6 +62,7 @@ final class BulkRequestModifier implements Iterator> { this.failedSlots = new SparseFixedBitSet(bulkRequest.requests().size()); this.itemResponses = new ArrayList<>(bulkRequest.requests().size()); this.originalSlots = new AtomicIntegerArray(bulkRequest.requests().size()); // oversize, but that's ok + this.failureStoreDocumentConverter = new FailureStoreDocumentConverter(); } @Override @@ -243,7 +245,7 @@ public void markItemForFailureStore(int slot, String targetIndexName, Exception ); } else { try { - IndexRequest errorDocument = FailureStoreDocument.transformFailedRequest(indexRequest, e, targetIndexName); + IndexRequest errorDocument = failureStoreDocumentConverter.transformFailedRequest(indexRequest, e, targetIndexName); // This is a fresh index request! We need to do some preprocessing on it. If we do not, when this is returned to // the bulk action, the action will see that it hasn't been processed by ingest yet and attempt to ingest it again. errorDocument.isPipelineResolved(true); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocument.java b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java similarity index 94% rename from server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocument.java rename to server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java index e0d6e8200e86d..ce76f377ac94e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocument.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverter.java @@ -22,9 +22,7 @@ /** * Transforms an indexing request using error information into a new index request to be stored in a data stream's failure store. */ -public final class FailureStoreDocument { - - private FailureStoreDocument() {} +public class FailureStoreDocumentConverter { /** * Combines an {@link IndexRequest} that has failed during the bulk process with the error thrown for that request. The result is a @@ -35,7 +33,7 @@ private FailureStoreDocument() {} * @return A new {@link IndexRequest} with a failure store compliant structure * @throws IOException If there is a problem when the document's new source is serialized */ - public static IndexRequest transformFailedRequest(IndexRequest source, Exception exception, String targetIndexName) throws IOException { + public IndexRequest transformFailedRequest(IndexRequest source, Exception exception, String targetIndexName) throws IOException { return transformFailedRequest(source, exception, targetIndexName, System::currentTimeMillis); } @@ -49,7 +47,7 @@ public static IndexRequest transformFailedRequest(IndexRequest source, Exception * @return A new {@link IndexRequest} with a failure store compliant structure * @throws IOException If there is a problem when the document's new source is serialized */ - public static IndexRequest transformFailedRequest( + public IndexRequest transformFailedRequest( IndexRequest source, Exception exception, String targetIndexName, diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java index 6028a6e21ecff..51cb05f981177 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java @@ -21,6 +21,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; @@ -123,14 +124,15 @@ private FieldCapabilitiesIndexResponse doFetch( final String shardUuid = indexService.getShard(shardId.getId()).getShardUuid(); indexMappingHash = mapping == null ? shardUuid : shardUuid + mapping.getSha256(); } + FieldPredicate fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName()); if (indexMappingHash != null) { + indexMappingHash = fieldPredicate.modifyHash(indexMappingHash); final Map existing = indexMappingHashToResponses.get(indexMappingHash); if (existing != null) { return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), indexMappingHash, existing, true); } } task.ensureNotCancelled(); - Predicate fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName()); final Map responseMap = retrieveFieldCaps( searchExecutionContext, fieldNameFilter, @@ -151,7 +153,7 @@ static Map retrieveFieldCaps( Predicate fieldNameFilter, String[] filters, String[] types, - Predicate indexFieldfilter, + FieldPredicate fieldPredicate, IndexShard indexShard, boolean includeEmptyFields ) { @@ -169,7 +171,7 @@ static Map retrieveFieldCaps( } MappedFieldType ft = entry.getValue(); if ((includeEmptyFields || ft.fieldHasValue(fieldInfos)) - && (indexFieldfilter.test(ft.name()) || context.isMetadataField(ft.name())) + && (fieldPredicate.test(ft.name()) || context.isMetadataField(ft.name())) && (filter == null || filter.test(ft))) { IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( field, diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java index b69b87190f2a7..3a27d6ac58534 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java @@ -13,15 +13,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import java.io.IOException; import java.util.List; import static org.elasticsearch.action.support.DefaultShardOperationFailedException.readShardOperationFailed; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * Base class for all broadcast operation based responses. @@ -30,35 +26,11 @@ public class BaseBroadcastResponse extends ActionResponse { public static final DefaultShardOperationFailedException[] EMPTY = new DefaultShardOperationFailedException[0]; - private static final ParseField _SHARDS_FIELD = new ParseField("_shards"); - private static final ParseField TOTAL_FIELD = new ParseField("total"); - private static final ParseField SUCCESSFUL_FIELD = new ParseField("successful"); - private static final ParseField FAILED_FIELD = new ParseField("failed"); - private static final ParseField FAILURES_FIELD = new ParseField("failures"); - private final int totalShards; private final int successfulShards; private final int failedShards; private final DefaultShardOperationFailedException[] shardFailures; - @SuppressWarnings("unchecked") - public static void declareBroadcastFields(ConstructingObjectParser PARSER) { - ConstructingObjectParser shardsParser = new ConstructingObjectParser<>( - "_shards", - true, - arg -> new BaseBroadcastResponse((int) arg[0], (int) arg[1], (int) arg[2], (List) arg[3]) - ); - shardsParser.declareInt(constructorArg(), TOTAL_FIELD); - shardsParser.declareInt(constructorArg(), SUCCESSFUL_FIELD); - shardsParser.declareInt(constructorArg(), FAILED_FIELD); - shardsParser.declareObjectArray( - optionalConstructorArg(), - (p, c) -> DefaultShardOperationFailedException.fromXContent(p), - FAILURES_FIELD - ); - PARSER.declareObject(constructorArg(), shardsParser, _SHARDS_FIELD); - } - public BaseBroadcastResponse(StreamInput in) throws IOException { totalShards = in.readVInt(); successfulShards = in.readVInt(); diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java index f236a9eff25a2..ad957f7a8f37f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java @@ -15,93 +15,25 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.Maps; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.HashMap; -import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; -import static java.util.Collections.emptyMap; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - public final class ClusterIndexHealth implements Writeable, ToXContentFragment { - private static final String STATUS = "status"; - private static final String NUMBER_OF_SHARDS = "number_of_shards"; - private static final String NUMBER_OF_REPLICAS = "number_of_replicas"; - private static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards"; - private static final String ACTIVE_SHARDS = "active_shards"; - private static final String RELOCATING_SHARDS = "relocating_shards"; - private static final String INITIALIZING_SHARDS = "initializing_shards"; - private static final String UNASSIGNED_SHARDS = "unassigned_shards"; - private static final String SHARDS = "shards"; - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "cluster_index_health", - true, - (parsedObjects, index) -> { - int i = 0; - int numberOfShards = (int) parsedObjects[i++]; - int numberOfReplicas = (int) parsedObjects[i++]; - int activeShards = (int) parsedObjects[i++]; - int relocatingShards = (int) parsedObjects[i++]; - int initializingShards = (int) parsedObjects[i++]; - int unassignedShards = (int) parsedObjects[i++]; - int activePrimaryShards = (int) parsedObjects[i++]; - String statusStr = (String) parsedObjects[i++]; - ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); - @SuppressWarnings("unchecked") - List shardList = (List) parsedObjects[i]; - final Map shards; - if (shardList == null || shardList.isEmpty()) { - shards = emptyMap(); - } else { - shards = Maps.newMapWithExpectedSize(shardList.size()); - for (ClusterShardHealth shardHealth : shardList) { - shards.put(shardHealth.getShardId(), shardHealth); - } - } - return new ClusterIndexHealth( - index, - numberOfShards, - numberOfReplicas, - activeShards, - relocatingShards, - initializingShards, - unassignedShards, - activePrimaryShards, - status, - shards - ); - } - ); - - public static final ObjectParser.NamedObjectParser SHARD_PARSER = ( - XContentParser p, - String indexIgnored, - String shardId) -> ClusterShardHealth.innerFromXContent(p, Integer.valueOf(shardId)); - - static { - PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_REPLICAS)); - PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(INITIALIZING_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(UNASSIGNED_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_PRIMARY_SHARDS)); - PARSER.declareString(constructorArg(), new ParseField(STATUS)); - // Can be absent if LEVEL == 'indices' or 'cluster' - PARSER.declareNamedObjects(optionalConstructorArg(), SHARD_PARSER, new ParseField(SHARDS)); - } + static final String STATUS = "status"; + static final String NUMBER_OF_SHARDS = "number_of_shards"; + static final String NUMBER_OF_REPLICAS = "number_of_replicas"; + static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards"; + static final String ACTIVE_SHARDS = "active_shards"; + static final String RELOCATING_SHARDS = "relocating_shards"; + static final String INITIALIZING_SHARDS = "initializing_shards"; + static final String UNASSIGNED_SHARDS = "unassigned_shards"; + static final String SHARDS = "shards"; private final String index; private final int numberOfShards; @@ -279,10 +211,6 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa return builder; } - public static ClusterIndexHealth innerFromXContent(XContentParser parser, String index) { - return PARSER.apply(parser, index); - } - @Override public String toString() { return "ClusterIndexHealth{" diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java index b3aa4275f7be7..785b0db5cc807 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java @@ -17,59 +17,20 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Locale; import java.util.Objects; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - public final class ClusterShardHealth implements Writeable, ToXContentFragment { - private static final String STATUS = "status"; - private static final String ACTIVE_SHARDS = "active_shards"; - private static final String RELOCATING_SHARDS = "relocating_shards"; - private static final String INITIALIZING_SHARDS = "initializing_shards"; - private static final String UNASSIGNED_SHARDS = "unassigned_shards"; - private static final String PRIMARY_ACTIVE = "primary_active"; - - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "cluster_shard_health", - true, - (parsedObjects, shardId) -> { - int i = 0; - boolean primaryActive = (boolean) parsedObjects[i++]; - int activeShards = (int) parsedObjects[i++]; - int relocatingShards = (int) parsedObjects[i++]; - int initializingShards = (int) parsedObjects[i++]; - int unassignedShards = (int) parsedObjects[i++]; - String statusStr = (String) parsedObjects[i]; - ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); - return new ClusterShardHealth( - shardId, - status, - activeShards, - relocatingShards, - initializingShards, - unassignedShards, - primaryActive - ); - } - ); - - static { - PARSER.declareBoolean(constructorArg(), new ParseField(PRIMARY_ACTIVE)); - PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(INITIALIZING_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(UNASSIGNED_SHARDS)); - PARSER.declareString(constructorArg(), new ParseField(STATUS)); - } + static final String STATUS = "status"; + static final String ACTIVE_SHARDS = "active_shards"; + static final String RELOCATING_SHARDS = "relocating_shards"; + static final String INITIALIZING_SHARDS = "initializing_shards"; + static final String UNASSIGNED_SHARDS = "unassigned_shards"; + static final String PRIMARY_ACTIVE = "primary_active"; private final int shardId; private final ClusterHealthStatus status; @@ -230,20 +191,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - static ClusterShardHealth innerFromXContent(XContentParser parser, Integer shardId) { - return PARSER.apply(parser, shardId); - } - - public static ClusterShardHealth fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - XContentParser.Token token = parser.nextToken(); - ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); - String shardIdStr = parser.currentName(); - ClusterShardHealth parsed = innerFromXContent(parser, Integer.valueOf(shardIdStr)); - ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); - return parsed; - } - @Override public String toString() { return Strings.toString(this); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index b450251ff7e3f..f424861c5b7ff 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.transport.Transports; @@ -921,7 +922,7 @@ private void findAliasInfo(final String[] aliases, final String[] possibleMatche */ public Map findMappings( String[] concreteIndices, - Function> fieldFilter, + Function> fieldFilter, Runnable onNextIndex ) { assert Transports.assertNotTransportThread("decompressing mappings is too expensive for a transport thread"); @@ -974,7 +975,7 @@ private static MappingMetadata filterFields(MappingMetadata mappingMetadata, Pre if (mappingMetadata == null) { return MappingMetadata.EMPTY_MAPPINGS; } - if (fieldPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) { + if (fieldPredicate == FieldPredicate.ACCEPT_ALL) { return mappingMetadata; } Map sourceAsMap = XContentHelper.convertToMap(mappingMetadata.source().compressedReference(), true).v2(); @@ -997,7 +998,7 @@ private static MappingMetadata filterFields(MappingMetadata mappingMetadata, Pre @SuppressWarnings("unchecked") private static boolean filterFields(String currentPath, Map fields, Predicate fieldPredicate) { - assert fieldPredicate != MapperPlugin.NOOP_FIELD_PREDICATE; + assert fieldPredicate != FieldPredicate.ACCEPT_ALL; Iterator> entryIterator = fields.entrySet().iterator(); while (entryIterator.hasNext()) { Map.Entry entry = entryIterator.next(); diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index ea8eadd66acaa..165280e370025 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -15,11 +15,13 @@ import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; +import java.util.function.BiFunction; import java.util.function.BiPredicate; import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.IntFunction; +import java.util.function.Supplier; import java.util.function.ToIntFunction; public class Iterators { @@ -56,7 +58,7 @@ public static Iterator concat(Iterator... iterators) { for (int i = 0; i < iterators.length; i++) { if (iterators[i].hasNext()) { // explicit generic type argument needed for type inference - return new ConcatenatedIterator(iterators, i); + return new ConcatenatedIterator<>(iterators, i); } } @@ -258,6 +260,103 @@ public T next() { } } + /** + * Enumerates the elements of an iterator together with their index, using a function to combine the pair together into the final items + * produced by the iterator. + *

+ * An example of its usage to enumerate a list of names together with their positional index in the list: + *

+ *

+     * Iterator<String> nameIterator = ...;
+     * Iterator<Tuple<Integer, String>> enumeratedNames = Iterators.enumerate(nameIterator, Tuple::new);
+     * enumeratedNames.forEachRemaining(tuple -> System.out.println("Index: " + t.v1() + ", Name: " + t.v2()));
+     * 
+ * + * @param input The iterator to wrap + * @param fn A function that takes the index for an entry and the entry itself, returning an item that combines them together + * @return An iterator that combines elements together with their indices in the underlying collection + * @param The object type contained in the original iterator + * @param The object type that results from combining the original entry with its index in the iterator + */ + public static Iterator enumerate(Iterator input, BiFunction fn) { + return new EnumeratingIterator<>(Objects.requireNonNull(input), Objects.requireNonNull(fn)); + } + + private static class EnumeratingIterator implements Iterator { + private final Iterator input; + private final BiFunction fn; + + private int idx = 0; + + EnumeratingIterator(Iterator input, BiFunction fn) { + this.input = input; + this.fn = fn; + } + + @Override + public boolean hasNext() { + return input.hasNext(); + } + + @Override + public U next() { + return fn.apply(idx++, input.next()); + } + + @Override + public void forEachRemaining(Consumer action) { + input.forEachRemaining(t -> action.accept(fn.apply(idx++, t))); + } + } + + /** + * Adapts a {@link Supplier} object into an iterator. The resulting iterator will return values from the delegate Supplier until the + * delegate returns a null value. Once the delegate returns null, the iterator will claim to be empty. + *

+ * An example of its usage to iterate over a queue while draining it at the same time: + *

+ *

+     *     LinkedList<String> names = ...;
+     *     assert names.size() != 0;
+     *
+     *     Iterator<String> nameIterator = Iterator.fromSupplier(names::pollFirst);
+     *     nameIterator.forEachRemaining(System.out::println)
+     *     assert names.size() == 0;
+     * 
+ * + * @param input A {@link Supplier} that returns null when no more elements should be returned from the iterator + * @return An iterator that returns elements by calling the supplier until a null value is returned + * @param The object type returned from the supplier function + */ + public static Iterator fromSupplier(Supplier input) { + return new SupplierIterator<>(Objects.requireNonNull(input)); + } + + private static final class SupplierIterator implements Iterator { + private final Supplier fn; + private T head; + + SupplierIterator(Supplier fn) { + this.fn = fn; + this.head = fn.get(); + } + + @Override + public boolean hasNext() { + return head != null; + } + + @Override + public T next() { + if (head == null) { + throw new NoSuchElementException(); + } + T next = head; + head = fn.get(); + return next; + } + } + public static boolean equals(Iterator iterator1, Iterator iterator2, BiPredicate itemComparer) { if (iterator1 == null) { return iterator2 == null; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java index 8505c561bfb1a..799042b4f3a87 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java @@ -333,7 +333,11 @@ public boolean newDynamicStringField(DocumentParserContext context, String name) ); } else { return createDynamicField( - new TextFieldMapper.Builder(name, context.indexAnalyzers()).addMultiField( + new TextFieldMapper.Builder( + name, + context.indexAnalyzers(), + context.indexSettings().getMode().isSyntheticSourceEnabled() + ).addMultiField( new KeywordFieldMapper.Builder("keyword", context.indexSettings().getIndexVersionCreated()).ignoreAbove(256) ), context diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index e029aaa657d23..fe9bdd73cfa10 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -450,13 +450,28 @@ public static class Builder { private final Map> mapperBuilders = new HashMap<>(); + private boolean hasSyntheticSourceCompatibleKeywordField; + public Builder add(FieldMapper.Builder builder) { mapperBuilders.put(builder.name(), builder::build); + + if (builder instanceof KeywordFieldMapper.Builder kwd) { + if (kwd.hasNormalizer() == false && (kwd.hasDocValues() || kwd.isStored())) { + hasSyntheticSourceCompatibleKeywordField = true; + } + } + return this; } private void add(FieldMapper mapper) { mapperBuilders.put(mapper.simpleName(), context -> mapper); + + if (mapper instanceof KeywordFieldMapper kwd) { + if (kwd.hasNormalizer() == false && (kwd.fieldType().hasDocValues() || kwd.fieldType().isStored())) { + hasSyntheticSourceCompatibleKeywordField = true; + } + } } private void update(FieldMapper toMerge, MapperMergeContext context) { @@ -474,6 +489,10 @@ public boolean hasMultiFields() { return mapperBuilders.isEmpty() == false; } + public boolean hasSyntheticSourceCompatibleKeywordField() { + return hasSyntheticSourceCompatibleKeywordField; + } + public MultiFields build(Mapper.Builder mainFieldBuilder, MapperBuilderContext context) { if (mapperBuilders.isEmpty()) { return empty(); @@ -1134,6 +1153,10 @@ public static Parameter storeParam(Function initi return Parameter.boolParam("store", false, initializer, defaultValue); } + public static Parameter storeParam(Function initializer, Supplier defaultValue) { + return Parameter.boolParam("store", false, initializer, defaultValue); + } + public static Parameter docValuesParam(Function initializer, boolean defaultValue) { return Parameter.boolParam("doc_values", false, initializer, defaultValue); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 4024798a85370..bdf25307d3343 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -227,6 +227,10 @@ Builder normalizer(String normalizerName) { return this; } + public boolean hasNormalizer() { + return this.normalizer.get() != null; + } + Builder nullValue(String nullValue) { this.nullValue.setValue(nullValue); return this; @@ -237,6 +241,10 @@ public Builder docValues(boolean hasDocValues) { return this; } + public boolean hasDocValues() { + return this.hasDocValues.get(); + } + public Builder dimension(boolean dimension) { this.dimension.setValue(dimension); return this; @@ -247,6 +255,15 @@ public Builder indexed(boolean indexed) { return this; } + public Builder stored(boolean stored) { + this.stored.setValue(stored); + return this; + } + + public boolean isStored() { + return this.stored.get(); + } + private FieldValues scriptValues() { if (script.get() == null) { return null; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java index dcf24c9a61bbd..aa2a7ce2f3996 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperRegistry.java @@ -10,13 +10,13 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import java.util.Collections; import java.util.LinkedHashMap; import java.util.Map; import java.util.function.Function; -import java.util.function.Predicate; /** * A registry for all field mappers. @@ -29,13 +29,13 @@ public final class MapperRegistry { private final Map metadataMapperParsers7x; private final Map metadataMapperParsers6x; private final Map metadataMapperParsers5x; - private final Function> fieldFilter; + private final Function fieldFilter; public MapperRegistry( Map mapperParsers, Map runtimeFieldParsers, Map metadataMapperParsers, - Function> fieldFilter + Function fieldFilter ) { this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers)); this.runtimeFieldParsers = runtimeFieldParsers; @@ -92,7 +92,7 @@ public Map getMetadataMapperParsers(Inde * {@link MapperPlugin#getFieldFilter()}, only fields that match all the registered filters will be returned by get mappings, * get index, get field mappings and field capabilities API. */ - public Function> getFieldFilter() { + public Function getFieldFilter() { return fieldFilter; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index faa840dacc732..ef512e2bbd46b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -236,9 +236,11 @@ private static FielddataFrequencyFilter parseFrequencyFilter(String name, Mappin public static class Builder extends FieldMapper.Builder { private final IndexVersion indexCreatedVersion; + private final Parameter store; + + private final boolean isSyntheticSourceEnabledViaIndexMode; private final Parameter index = Parameter.indexParam(m -> ((TextFieldMapper) m).index, true); - private final Parameter store = Parameter.storeParam(m -> ((TextFieldMapper) m).store, false); final Parameter similarity = TextParams.similarity(m -> ((TextFieldMapper) m).similarity); @@ -283,12 +285,28 @@ public static class Builder extends FieldMapper.Builder { final TextParams.Analyzers analyzers; - public Builder(String name, IndexAnalyzers indexAnalyzers) { - this(name, IndexVersion.current(), indexAnalyzers); + public Builder(String name, IndexAnalyzers indexAnalyzers, boolean isSyntheticSourceEnabledViaIndexMode) { + this(name, IndexVersion.current(), indexAnalyzers, isSyntheticSourceEnabledViaIndexMode); } - public Builder(String name, IndexVersion indexCreatedVersion, IndexAnalyzers indexAnalyzers) { + public Builder( + String name, + IndexVersion indexCreatedVersion, + IndexAnalyzers indexAnalyzers, + boolean isSyntheticSourceEnabledViaIndexMode + ) { super(name); + + // If synthetic source is used we need to either store this field + // to recreate the source or use keyword multi-fields for that. + // So if there are no suitable multi-fields we will default to + // storing the field without requiring users to explicitly set 'store'. + // + // If 'store' parameter was explicitly provided we'll reject the request. + this.store = Parameter.storeParam( + m -> ((TextFieldMapper) m).store, + () -> isSyntheticSourceEnabledViaIndexMode && multiFieldsBuilder.hasSyntheticSourceCompatibleKeywordField() == false + ); this.indexCreatedVersion = indexCreatedVersion; this.analyzers = new TextParams.Analyzers( indexAnalyzers, @@ -296,6 +314,7 @@ public Builder(String name, IndexVersion indexCreatedVersion, IndexAnalyzers ind m -> (((TextFieldMapper) m).positionIncrementGap), indexCreatedVersion ); + this.isSyntheticSourceEnabledViaIndexMode = isSyntheticSourceEnabledViaIndexMode; } public Builder index(boolean index) { @@ -387,13 +406,9 @@ private static KeywordFieldMapper.KeywordFieldType syntheticSourceDelegate(Field if (fieldType.stored()) { return null; } - for (Mapper sub : multiFields) { - if (sub.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) { - KeywordFieldMapper kwd = (KeywordFieldMapper) sub; - if (kwd.hasNormalizer() == false && (kwd.fieldType().hasDocValues() || kwd.fieldType().isStored())) { - return kwd.fieldType(); - } - } + var kwd = getKeywordFieldMapperForSyntheticSource(multiFields); + if (kwd != null) { + return kwd.fieldType(); } return null; } @@ -483,7 +498,7 @@ public TextFieldMapper build(MapperBuilderContext context) { private static final IndexVersion MINIMUM_COMPATIBILITY_VERSION = IndexVersion.fromId(5000099); public static final TypeParser PARSER = new TypeParser( - (n, c) -> new Builder(n, c.indexVersionCreated(), c.getIndexAnalyzers()), + (n, c) -> new Builder(n, c.indexVersionCreated(), c.getIndexAnalyzers(), c.getIndexSettings().getMode().isSyntheticSourceEnabled()), MINIMUM_COMPATIBILITY_VERSION ); @@ -1203,6 +1218,8 @@ public Query existsQuery(SearchExecutionContext context) { private final SubFieldInfo prefixFieldInfo; private final SubFieldInfo phraseFieldInfo; + private final boolean isSyntheticSourceEnabledViaIndexMode; + private TextFieldMapper( String simpleName, FieldType fieldType, @@ -1235,6 +1252,7 @@ private TextFieldMapper( this.indexPrefixes = builder.indexPrefixes.getValue(); this.freqFilter = builder.freqFilter.getValue(); this.fieldData = builder.fieldData.get(); + this.isSyntheticSourceEnabledViaIndexMode = builder.isSyntheticSourceEnabledViaIndexMode; } @Override @@ -1258,7 +1276,7 @@ public Map indexAnalyzers() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), indexCreatedVersion, indexAnalyzers).init(this); + return new Builder(simpleName(), indexCreatedVersion, indexAnalyzers, isSyntheticSourceEnabledViaIndexMode).init(this); } @Override @@ -1454,15 +1472,12 @@ protected void write(XContentBuilder b, Object value) throws IOException { } }; } - for (Mapper sub : this) { - if (sub.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) { - KeywordFieldMapper kwd = (KeywordFieldMapper) sub; - if (kwd.hasNormalizer() == false && (kwd.fieldType().hasDocValues() || kwd.fieldType().isStored())) { - return kwd.syntheticFieldLoader(simpleName()); - } - } + var kwd = getKeywordFieldMapperForSyntheticSource(this); + if (kwd != null) { + return kwd.syntheticFieldLoader(simpleName()); } + throw new IllegalArgumentException( String.format( Locale.ROOT, @@ -1473,4 +1488,17 @@ protected void write(XContentBuilder b, Object value) throws IOException { ) ); } + + private static KeywordFieldMapper getKeywordFieldMapperForSyntheticSource(Iterable multiFields) { + for (Mapper sub : multiFields) { + if (sub.typeName().equals(KeywordFieldMapper.CONTENT_TYPE)) { + KeywordFieldMapper kwd = (KeywordFieldMapper) sub; + if (kwd.hasNormalizer() == false && (kwd.fieldType().hasDocValues() || kwd.fieldType().isStored())) { + return kwd; + } + } + } + + return null; + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index fbfce6aab403f..6ab5d6d77d86d 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -196,7 +196,11 @@ MappedFieldType failIfFieldMappingNotFound(String name, MappedFieldType fieldMap if (fieldMapping != null || allowUnmappedFields) { return fieldMapping; } else if (mapUnmappedFieldAsString) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, getIndexAnalyzers()); + TextFieldMapper.Builder builder = new TextFieldMapper.Builder( + name, + getIndexAnalyzers(), + getIndexSettings() != null && getIndexSettings().getMode().isSyntheticSourceEnabled() + ); return builder.build(MapperBuilderContext.root(false, false)).fieldType(); } else { throw new QueryShardException(this, "No field mapping can be found for the field with name [{}]", name); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 048d9adb8e7e3..b17777fc5a91e 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -72,6 +72,7 @@ import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.store.IndicesStore; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; @@ -83,7 +84,6 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.function.Predicate; /** * Configures classes and services that are shared by indices on each node. @@ -307,18 +307,15 @@ public static Set getBuiltInMetadataFields() { return builtInMetadataFields; } - private static Function> getFieldFilter(List mapperPlugins) { - Function> fieldFilter = MapperPlugin.NOOP_FIELD_FILTER; + private static Function getFieldFilter(List mapperPlugins) { + Function fieldFilter = MapperPlugin.NOOP_FIELD_FILTER; for (MapperPlugin mapperPlugin : mapperPlugins) { fieldFilter = and(fieldFilter, mapperPlugin.getFieldFilter()); } return fieldFilter; } - private static Function> and( - Function> first, - Function> second - ) { + private static Function and(Function first, Function second) { // the purpose of this method is to not chain no-op field predicates, so that we can easily find out when no plugins plug in // a field filter, hence skip the mappings filtering part as a whole, as it requires parsing mappings into a map. if (first == MapperPlugin.NOOP_FIELD_FILTER) { @@ -328,15 +325,15 @@ private static Function> and( return first; } return index -> { - Predicate firstPredicate = first.apply(index); - Predicate secondPredicate = second.apply(index); - if (firstPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) { + FieldPredicate firstPredicate = first.apply(index); + FieldPredicate secondPredicate = second.apply(index); + if (firstPredicate == FieldPredicate.ACCEPT_ALL) { return secondPredicate; } - if (secondPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) { + if (secondPredicate == FieldPredicate.ACCEPT_ALL) { return firstPredicate; } - return firstPredicate.and(secondPredicate); + return new FieldPredicate.And(firstPredicate, secondPredicate); }; } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 3319b29df6dfa..026a20415aa91 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -128,6 +128,7 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.store.CompositeIndexFoldersDeletionListener; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesService; @@ -168,7 +169,6 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; -import java.util.function.Predicate; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -1756,7 +1756,7 @@ public void clearIndexShardCache(ShardId shardId, boolean queryCache, boolean fi * {@link org.elasticsearch.plugins.MapperPlugin#getFieldFilter()}, only fields that match all the registered filters will be * returned by get mappings, get index, get field mappings and field capabilities API. */ - public Function> getFieldFilter() { + public Function getFieldFilter() { return mapperRegistry.getFieldFilter(); } diff --git a/server/src/main/java/org/elasticsearch/plugins/FieldPredicate.java b/server/src/main/java/org/elasticsearch/plugins/FieldPredicate.java new file mode 100644 index 0000000000000..32692b9740f91 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/plugins/FieldPredicate.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugins; + +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.cluster.metadata.MappingMetadata; + +import java.util.function.Predicate; + +/** + * Filter for visible fields. + */ +public interface FieldPredicate extends Accountable, Predicate { + /** + * The default field predicate applied, which doesn't filter anything. That means that by default get mappings, get index + * get field mappings and field capabilities API will return every field that's present in the mappings. + */ + FieldPredicate ACCEPT_ALL = new FieldPredicate() { + @Override + public boolean test(String field) { + return true; + } + + @Override + public String modifyHash(String hash) { + return hash; + } + + @Override + public long ramBytesUsed() { + return 0; // Shared + } + + @Override + public String toString() { + return "accept all"; + } + }; + + /** + * Should this field be included? + */ + @Override + boolean test(String field); + + /** + * Modify the {@link MappingMetadata#getSha256} to track any filtering this predicate + * has performed on the list of fields. + */ + String modifyHash(String hash); + + class And implements FieldPredicate { + private static final long SHALLOW_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(And.class); + + private final FieldPredicate first; + private final FieldPredicate second; + + public And(FieldPredicate first, FieldPredicate second) { + this.first = first; + this.second = second; + } + + @Override + public boolean test(String field) { + return first.test(field) && second.test(field); + } + + @Override + public String modifyHash(String hash) { + return second.modifyHash(first.modifyHash(hash)); + } + + @Override + public long ramBytesUsed() { + return SHALLOW_RAM_BYTES_USED + first.ramBytesUsed() + second.ramBytesUsed(); + } + + @Override + public String toString() { + return first + " then " + second; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java b/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java index 401c014488f88..45f04487886d3 100644 --- a/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/MapperPlugin.java @@ -8,7 +8,6 @@ package org.elasticsearch.plugins; -import org.elasticsearch.core.Predicates; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.RuntimeField; @@ -16,7 +15,6 @@ import java.util.Collections; import java.util.Map; import java.util.function.Function; -import java.util.function.Predicate; /** * An extension point for {@link Plugin} implementations to add custom mappers @@ -62,19 +60,23 @@ default Map getMetadataMappers() { * get index, get field mappings and field capabilities API. Useful to filter the fields that such API return. The predicate receives * the field name as input argument and should return true to show the field and false to hide it. */ - default Function> getFieldFilter() { + default Function getFieldFilter() { return NOOP_FIELD_FILTER; } - /** - * The default field predicate applied, which doesn't filter anything. That means that by default get mappings, get index - * get field mappings and field capabilities API will return every field that's present in the mappings. - */ - Predicate NOOP_FIELD_PREDICATE = Predicates.always(); - /** * The default field filter applied, which doesn't filter anything. That means that by default get mappings, get index * get field mappings and field capabilities API will return every field that's present in the mappings. */ - Function> NOOP_FIELD_FILTER = index -> NOOP_FIELD_PREDICATE; + Function NOOP_FIELD_FILTER = new Function<>() { + @Override + public FieldPredicate apply(String index) { + return FieldPredicate.ACCEPT_ALL; + } + + @Override + public String toString() { + return "accept all"; + } + }; } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java b/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java index 7b3ea4fbe4581..b64383c562c50 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java @@ -11,23 +11,16 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.stream.Collectors; - -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; /** * The allowable types, languages and their corresponding contexts. When serialized there is a top level types_allowed list, @@ -68,10 +61,10 @@ * */ public class ScriptLanguagesInfo implements ToXContentObject, Writeable { - private static final ParseField TYPES_ALLOWED = new ParseField("types_allowed"); - private static final ParseField LANGUAGE_CONTEXTS = new ParseField("language_contexts"); - private static final ParseField LANGUAGE = new ParseField("language"); - private static final ParseField CONTEXTS = new ParseField("contexts"); + public static final ParseField TYPES_ALLOWED = new ParseField("types_allowed"); + public static final ParseField LANGUAGE_CONTEXTS = new ParseField("language_contexts"); + public static final ParseField LANGUAGE = new ParseField("language"); + public static final ParseField CONTEXTS = new ParseField("contexts"); public final Set typesAllowed; public final Map> languageContexts; @@ -86,31 +79,6 @@ public ScriptLanguagesInfo(StreamInput in) throws IOException { languageContexts = in.readImmutableMap(sin -> sin.readCollectionAsImmutableSet(StreamInput::readString)); } - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "script_languages_info", - true, - (a) -> new ScriptLanguagesInfo( - new HashSet<>((List) a[0]), - ((List>>) a[1]).stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2)) - ) - ); - - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser>, Void> LANGUAGE_CONTEXT_PARSER = - new ConstructingObjectParser<>("language_contexts", true, (m, name) -> new Tuple<>((String) m[0], Set.copyOf((List) m[1]))); - - static { - PARSER.declareStringArray(constructorArg(), TYPES_ALLOWED); - PARSER.declareObjectArray(constructorArg(), LANGUAGE_CONTEXT_PARSER, LANGUAGE_CONTEXTS); - LANGUAGE_CONTEXT_PARSER.declareString(constructorArg(), LANGUAGE); - LANGUAGE_CONTEXT_PARSER.declareStringArray(constructorArg(), CONTEXTS); - } - - public static ScriptLanguagesInfo fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(typesAllowed); diff --git a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java index 5f8e6a893c1b5..e83fa79c79460 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java @@ -15,20 +15,16 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.QueryProfileShardResult; -import org.elasticsearch.xcontent.InstantiatingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParserConstructor; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - public class SearchProfileDfsPhaseResult implements Writeable, ToXContentObject { private final ProfileResult dfsShardResult; @@ -63,24 +59,8 @@ public void writeTo(StreamOutput out) throws IOException { } } - private static final ParseField STATISTICS = new ParseField("statistics"); - private static final ParseField KNN = new ParseField("knn"); - private static final InstantiatingObjectParser PARSER; - - static { - InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( - "search_profile_dfs_phase_result", - true, - SearchProfileDfsPhaseResult.class - ); - parser.declareObject(optionalConstructorArg(), (p, c) -> ProfileResult.fromXContent(p), STATISTICS); - parser.declareObjectArray(optionalConstructorArg(), (p, c) -> QueryProfileShardResult.fromXContent(p), KNN); - PARSER = parser.build(); - } - - public static SearchProfileDfsPhaseResult fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } + public static final ParseField STATISTICS = new ParseField("statistics"); + public static final ParseField KNN = new ParseField("knn"); @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java b/server/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java index e72ef2d9b3ece..8aebde23d6a87 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java @@ -17,7 +17,6 @@ import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -25,8 +24,6 @@ import java.util.List; import java.util.Objects; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - /** * A container class to hold the profile results for a single shard in the request. * Contains a list of query profiles, a collector tree and a total rewrite tree. @@ -139,42 +136,4 @@ public int hashCode() { public String toString() { return Strings.toString(this); } - - public static QueryProfileShardResult fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token = parser.currentToken(); - ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - String currentFieldName = null; - List queryProfileResults = new ArrayList<>(); - long rewriteTime = 0; - Long vectorOperationsCount = null; - CollectorResult collector = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (REWRITE_TIME.equals(currentFieldName)) { - rewriteTime = parser.longValue(); - } else if (VECTOR_OPERATIONS_COUNT.equals(currentFieldName)) { - vectorOperationsCount = parser.longValue(); - } else { - parser.skipChildren(); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if (QUERY_ARRAY.equals(currentFieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - queryProfileResults.add(ProfileResult.fromXContent(parser)); - } - } else if (COLLECTOR.equals(currentFieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - collector = CollectorResult.fromXContent(parser); - } - } else { - parser.skipChildren(); - } - } else { - parser.skipChildren(); - } - } - return new QueryProfileShardResult(queryProfileResults, rewriteTime, collector, vectorOperationsCount); - } } diff --git a/server/src/main/java/org/elasticsearch/search/runtime/AbstractBooleanScriptFieldQuery.java b/server/src/main/java/org/elasticsearch/search/runtime/AbstractBooleanScriptFieldQuery.java index 38363ee3e3fdd..c6ddd1964188f 100644 --- a/server/src/main/java/org/elasticsearch/search/runtime/AbstractBooleanScriptFieldQuery.java +++ b/server/src/main/java/org/elasticsearch/search/runtime/AbstractBooleanScriptFieldQuery.java @@ -23,7 +23,7 @@ abstract class AbstractBooleanScriptFieldQuery extends AbstractScriptFieldQuery< } @Override - protected boolean matches(BooleanFieldScript scriptContext, int docId) { + protected final boolean matches(BooleanFieldScript scriptContext, int docId) { scriptContext.runForDoc(docId); return matches(scriptContext.trues(), scriptContext.falses()); } diff --git a/server/src/main/java/org/elasticsearch/search/runtime/AbstractDoubleScriptFieldQuery.java b/server/src/main/java/org/elasticsearch/search/runtime/AbstractDoubleScriptFieldQuery.java index 500d00628bd19..722cff6fc0edf 100644 --- a/server/src/main/java/org/elasticsearch/search/runtime/AbstractDoubleScriptFieldQuery.java +++ b/server/src/main/java/org/elasticsearch/search/runtime/AbstractDoubleScriptFieldQuery.java @@ -22,7 +22,7 @@ abstract class AbstractDoubleScriptFieldQuery extends AbstractScriptFieldQuery values) { + protected TwoPhaseIterator createTwoPhaseIterator(StringFieldScript scriptContext, DocIdSetIterator approximation) { + BytesRefBuilder scratch = new BytesRefBuilder(); + return new TwoPhaseIterator(approximation) { + @Override + public boolean matches() { + scriptContext.runForDoc(approximation.docID()); + return AbstractStringScriptFieldAutomatonQuery.this.matches(scriptContext.getValues(), scratch); + } + + @Override + public float matchCost() { + return MATCH_COST; + } + }; + } + + protected final boolean matches(List values, BytesRefBuilder scratch) { for (String value : values) { scratch.copyChars(value); if (automaton.run(scratch.bytes(), 0, scratch.length())) { @@ -41,6 +58,11 @@ protected final boolean matches(List values) { return false; } + @Override + protected final boolean matches(List values) { + throw new UnsupportedOperationException(); + } + @Override public final void visit(QueryVisitor visitor) { if (visitor.acceptField(fieldName())) { diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index b392111557615..0a1480526c9f0 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -66,6 +66,7 @@ 7.17.16,7171699 7.17.17,7171799 7.17.18,7171899 +7.17.19,7171999 8.0.0,8000099 8.0.1,8000199 8.1.0,8010099 @@ -112,3 +113,4 @@ 8.12.0,8560000 8.12.1,8560001 8.12.2,8560001 +8.13.0,8595000 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index f2da9fcaf60ce..f66cda3c08fc7 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -66,6 +66,7 @@ 7.17.16,7171699 7.17.17,7171799 7.17.18,7171899 +7.17.19,7171999 8.0.0,8000099 8.0.1,8000199 8.1.0,8010099 @@ -112,3 +113,4 @@ 8.12.0,8500008 8.12.1,8500010 8.12.2,8500010 +8.13.0,8503000 diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index d4231c9f7538b..11655a93097cc 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -110,7 +110,7 @@ public class ClusterHealthResponsesTests extends AbstractXContentSerializingTest private static final ObjectParser.NamedObjectParser INDEX_PARSER = ( XContentParser parser, Void context, - String index) -> ClusterIndexHealth.innerFromXContent(parser, index); + String index) -> ClusterIndexHealthTests.parseInstance(parser, index); static { // ClusterStateHealth fields diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java index 4980d0f786d84..50f230022b375 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexStatusTests.java @@ -8,17 +8,63 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.function.Predicate; +import static java.util.Collections.emptyMap; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + public class SnapshotIndexStatusTests extends AbstractXContentTestCase { + static final ObjectParser.NamedObjectParser PARSER; + static { + ConstructingObjectParser innerParser = new ConstructingObjectParser<>( + "snapshot_index_status", + true, + (Object[] parsedObjects, String index) -> { + int i = 0; + SnapshotShardsStats shardsStats = ((SnapshotShardsStats) parsedObjects[i++]); + SnapshotStats stats = ((SnapshotStats) parsedObjects[i++]); + @SuppressWarnings("unchecked") + List shardStatuses = (List) parsedObjects[i]; + + final Map indexShards; + if (shardStatuses == null || shardStatuses.isEmpty()) { + indexShards = emptyMap(); + } else { + indexShards = Maps.newMapWithExpectedSize(shardStatuses.size()); + for (SnapshotIndexShardStatus shardStatus : shardStatuses) { + indexShards.put(shardStatus.getShardId().getId(), shardStatus); + } + } + return new SnapshotIndexStatus(index, indexShards, shardsStats, stats); + } + ); + innerParser.declareObject( + constructorArg(), + (p, c) -> SnapshotShardsStatsTests.PARSER.apply(p, null), + new ParseField(SnapshotShardsStats.Fields.SHARDS_STATS) + ); + innerParser.declareObject(constructorArg(), (p, c) -> SnapshotStats.fromXContent(p), new ParseField(SnapshotStats.Fields.STATS)); + innerParser.declareNamedObjects( + constructorArg(), + SnapshotIndexShardStatus.PARSER, + new ParseField(SnapshotIndexStatus.Fields.SHARDS) + ); + PARSER = ((p, c, name) -> innerParser.apply(p, name)); + } + @Override protected SnapshotIndexStatus createTestInstance() { String index = randomAlphaOfLength(10); @@ -40,7 +86,8 @@ protected Predicate getRandomFieldsExcludeFilter() { protected SnapshotIndexStatus doParseInstance(XContentParser parser) throws IOException { XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser); - SnapshotIndexStatus status = SnapshotIndexStatus.fromXContent(parser); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); + SnapshotIndexStatus status = PARSER.parse(parser, null, parser.currentName()); XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); return status; } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java index 9d4b8d601c63b..a9eacb49798f9 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStatsTests.java @@ -9,12 +9,39 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + public class SnapshotShardsStatsTests extends AbstractXContentTestCase { + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + SnapshotShardsStats.Fields.SHARDS_STATS, + true, + (Object[] parsedObjects) -> { + int i = 0; + int initializingShards = (int) parsedObjects[i++]; + int startedShards = (int) parsedObjects[i++]; + int finalizingShards = (int) parsedObjects[i++]; + int doneShards = (int) parsedObjects[i++]; + int failedShards = (int) parsedObjects[i++]; + int totalShards = (int) parsedObjects[i]; + return new SnapshotShardsStats(initializingShards, startedShards, finalizingShards, doneShards, failedShards, totalShards); + } + ); + static { + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.INITIALIZING)); + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.STARTED)); + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.FINALIZING)); + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.DONE)); + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.FAILED)); + PARSER.declareInt(constructorArg(), new ParseField(SnapshotShardsStats.Fields.TOTAL)); + } + @Override protected SnapshotShardsStats createTestInstance() { int initializingShards = randomInt(); @@ -28,7 +55,7 @@ protected SnapshotShardsStats createTestInstance() { @Override protected SnapshotShardsStats doParseInstance(XContentParser parser) throws IOException { - return SnapshotShardsStats.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java index 9c28930f12382..a32a66a55454f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatusTests.java @@ -11,20 +11,79 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Strings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.function.Predicate; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class SnapshotStatusTests extends AbstractChunkedSerializingTestCase { + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "snapshot_status", + true, + (Object[] parsedObjects) -> { + int i = 0; + String name = (String) parsedObjects[i++]; + String repository = (String) parsedObjects[i++]; + String uuid = (String) parsedObjects[i++]; + String rawState = (String) parsedObjects[i++]; + Boolean includeGlobalState = (Boolean) parsedObjects[i++]; + SnapshotStats stats = ((SnapshotStats) parsedObjects[i++]); + SnapshotShardsStats shardsStats = ((SnapshotShardsStats) parsedObjects[i++]); + @SuppressWarnings("unchecked") + List indices = ((List) parsedObjects[i]); + + Snapshot snapshot = new Snapshot(repository, new SnapshotId(name, uuid)); + SnapshotsInProgress.State state = SnapshotsInProgress.State.valueOf(rawState); + Map indicesStatus; + List shards; + if (indices == null || indices.isEmpty()) { + indicesStatus = emptyMap(); + shards = emptyList(); + } else { + indicesStatus = Maps.newMapWithExpectedSize(indices.size()); + shards = new ArrayList<>(); + for (SnapshotIndexStatus index : indices) { + indicesStatus.put(index.getIndex(), index); + shards.addAll(index.getShards().values()); + } + } + return new SnapshotStatus(snapshot, state, shards, indicesStatus, shardsStats, stats, includeGlobalState); + } + ); + static { + PARSER.declareString(constructorArg(), new ParseField(SnapshotStatus.SNAPSHOT)); + PARSER.declareString(constructorArg(), new ParseField(SnapshotStatus.REPOSITORY)); + PARSER.declareString(constructorArg(), new ParseField(SnapshotStatus.UUID)); + PARSER.declareString(constructorArg(), new ParseField(SnapshotStatus.STATE)); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField(SnapshotStatus.INCLUDE_GLOBAL_STATE)); + PARSER.declareField( + constructorArg(), + SnapshotStats::fromXContent, + new ParseField(SnapshotStats.Fields.STATS), + ObjectParser.ValueType.OBJECT + ); + PARSER.declareObject(constructorArg(), SnapshotShardsStatsTests.PARSER, new ParseField(SnapshotShardsStats.Fields.SHARDS_STATS)); + PARSER.declareNamedObjects(constructorArg(), SnapshotIndexStatusTests.PARSER, new ParseField(SnapshotStatus.INDICES)); + } + public void testToString() throws Exception { SnapshotsInProgress.State state = randomFrom(SnapshotsInProgress.State.values()); String uuid = UUIDs.randomBase64UUID(); @@ -180,7 +239,7 @@ protected Predicate getRandomFieldsExcludeFilter() { @Override protected SnapshotStatus doParseInstance(XContentParser parser) throws IOException { - return SnapshotStatus.fromXContent(parser); + return PARSER.parse(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java index 21cba892669d0..6b921419c0fd4 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponseTests.java @@ -33,7 +33,7 @@ public class SnapshotsStatusResponseTests extends AbstractChunkedSerializingTest } ); static { - PARSER.declareObjectArray(constructorArg(), SnapshotStatus.PARSER, new ParseField("snapshots")); + PARSER.declareObjectArray(constructorArg(), SnapshotStatusTests.PARSER, new ParseField("snapshots")); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java index f8d3871fbfa8f..ec56a57aa3a90 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java @@ -10,8 +10,10 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Tuple; import org.elasticsearch.script.ScriptLanguagesInfo; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -22,8 +24,33 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; public class GetScriptLanguageResponseTests extends AbstractXContentSerializingTestCase { + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "script_languages_info", + true, + (a) -> new ScriptLanguagesInfo( + new HashSet<>((List) a[0]), + ((List>>) a[1]).stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2)) + ) + ); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser>, Void> LANGUAGE_CONTEXT_PARSER = + new ConstructingObjectParser<>("language_contexts", true, (m, name) -> new Tuple<>((String) m[0], Set.copyOf((List) m[1]))); + + static { + PARSER.declareStringArray(constructorArg(), ScriptLanguagesInfo.TYPES_ALLOWED); + PARSER.declareObjectArray(constructorArg(), LANGUAGE_CONTEXT_PARSER, ScriptLanguagesInfo.LANGUAGE_CONTEXTS); + LANGUAGE_CONTEXT_PARSER.declareString(constructorArg(), ScriptLanguagesInfo.LANGUAGE); + LANGUAGE_CONTEXT_PARSER.declareStringArray(constructorArg(), ScriptLanguagesInfo.CONTEXTS); + } + private static int MAX_VALUES = 4; private static final int MIN_LENGTH = 1; private static final int MAX_LENGTH = 16; @@ -38,7 +65,7 @@ protected GetScriptLanguageResponse createTestInstance() { @Override protected GetScriptLanguageResponse doParseInstance(XContentParser parser) throws IOException { - return new GetScriptLanguageResponse(ScriptLanguagesInfo.fromXContent(parser)); + return new GetScriptLanguageResponse(PARSER.parse(parser, null)); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java index f0802e471fc38..8cf8a1c064004 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponseTests.java @@ -25,7 +25,6 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.action.support.broadcast.BaseBroadcastResponse.declareBroadcastFields; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; public class ReloadAnalyzersResponseTests extends AbstractBroadcastResponseTestCase { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java index 9ec910e79918c..5df0fa27f1016 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponseTests.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.Set; -import static org.elasticsearch.action.support.broadcast.BaseBroadcastResponse.declareBroadcastFields; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java index 6c45367baf674..7c50ba3beae76 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java @@ -17,12 +17,15 @@ import org.elasticsearch.action.delete.DeleteResponseTests; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.IndexResponseTests; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.action.update.UpdateResponseTests; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -43,6 +46,54 @@ public class BulkItemResponseTests extends ESTestCase { + /** + * Parse the output of the {@link DocWriteResponse#innerToXContent(XContentBuilder, ToXContent.Params)} method. + * + * This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning + * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly + * if needed and then immediately returns. + */ + public static void parseInnerToXContent(XContentParser parser, DocWriteResponse.Builder context) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + + String currentFieldName = parser.currentName(); + token = parser.nextToken(); + + if (token.isValue()) { + if (DocWriteResponse._INDEX.equals(currentFieldName)) { + // index uuid and shard id are unknown and can't be parsed back for now. + context.setShardId(new ShardId(new Index(parser.text(), IndexMetadata.INDEX_UUID_NA_VALUE), -1)); + } else if (DocWriteResponse._ID.equals(currentFieldName)) { + context.setId(parser.text()); + } else if (DocWriteResponse._VERSION.equals(currentFieldName)) { + context.setVersion(parser.longValue()); + } else if (DocWriteResponse.RESULT.equals(currentFieldName)) { + String result = parser.text(); + for (DocWriteResponse.Result r : DocWriteResponse.Result.values()) { + if (r.getLowercase().equals(result)) { + context.setResult(r); + break; + } + } + } else if (DocWriteResponse.FORCED_REFRESH.equals(currentFieldName)) { + context.setForcedRefresh(parser.booleanValue()); + } else if (DocWriteResponse._SEQ_NO.equals(currentFieldName)) { + context.setSeqNo(parser.longValue()); + } else if (DocWriteResponse._PRIMARY_TERM.equals(currentFieldName)) { + context.setPrimaryTerm(parser.longValue()); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (DocWriteResponse._SHARDS.equals(currentFieldName)) { + context.setShardInfo(ReplicationResponse.ShardInfo.fromXContent(parser)); + } else { + parser.skipChildren(); // skip potential inner objects for forward compatibility + } + } else if (token == XContentParser.Token.START_ARRAY) { + parser.skipChildren(); // skip potential inner arrays for forward compatibility + } + } + public void testBulkItemResponseShouldContainTypeInV7CompatibilityMode() throws IOException { BulkItemResponse bulkItemResponse = BulkItemResponse.success( randomInt(), @@ -192,7 +243,7 @@ public static BulkItemResponse itemResponseFromXContent(XContentParser parser, i if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { final IndexResponse.Builder indexResponseBuilder = new IndexResponse.Builder(); builder = indexResponseBuilder; - itemParser = indexParser -> DocWriteResponse.parseInnerToXContent(indexParser, indexResponseBuilder); + itemParser = indexParser -> parseInnerToXContent(indexParser, indexResponseBuilder); } else if (opType == DocWriteRequest.OpType.UPDATE) { final UpdateResponse.Builder updateResponseBuilder = new UpdateResponse.Builder(); builder = updateResponseBuilder; @@ -201,7 +252,7 @@ public static BulkItemResponse itemResponseFromXContent(XContentParser parser, i } else if (opType == DocWriteRequest.OpType.DELETE) { final DeleteResponse.Builder deleteResponseBuilder = new DeleteResponse.Builder(); builder = deleteResponseBuilder; - itemParser = deleteParser -> DocWriteResponse.parseInnerToXContent(deleteParser, deleteResponseBuilder); + itemParser = deleteParser -> parseInnerToXContent(deleteParser, deleteResponseBuilder); } else { throwUnknownField(currentFieldName, parser); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java new file mode 100644 index 0000000000000..2226c40b618f4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java @@ -0,0 +1,870 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.coordination.NoMasterBlockService; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamTestHelper; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.MapperException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.node.NodeClosedException; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpNodeClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Assume; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class BulkOperationTests extends ESTestCase { + + private final long millis = randomMillisUpToYear9999(); + private final String indexName = "my_index"; + private final String dataStreamName = "my_data_stream"; + private final String fsDataStreamName = "my_failure_store_data_stream"; + + private final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .build() + ) + .build(); + private final IndexMetadata ds1BackingIndex1 = DataStreamTestHelper.createBackingIndex(dataStreamName, 1, millis) + .numberOfShards(2) + .build(); + private final IndexMetadata ds1BackingIndex2 = DataStreamTestHelper.createBackingIndex(dataStreamName, 2, millis + 1) + .numberOfShards(2) + .build(); + private final IndexMetadata ds2BackingIndex1 = DataStreamTestHelper.createBackingIndex(fsDataStreamName, 1, millis) + .numberOfShards(2) + .build(); + private final IndexMetadata ds2FailureStore1 = DataStreamTestHelper.createFailureStore(fsDataStreamName, 1, millis) + .numberOfShards(1) + .build(); + + private final DataStream dataStream1 = DataStreamTestHelper.newInstance( + dataStreamName, + List.of(ds1BackingIndex1.getIndex(), ds1BackingIndex2.getIndex()) + ); + private final DataStream dataStream2 = DataStreamTestHelper.newInstance( + fsDataStreamName, + List.of(ds2BackingIndex1.getIndex()), + List.of(ds2FailureStore1.getIndex()) + ); + + private final ClusterState DEFAULT_STATE = ClusterState.builder(ClusterName.DEFAULT) + .metadata( + Metadata.builder() + .indexTemplates( + Map.of( + "ds-template", + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, false)) + .template(new Template(null, null, null, null)) + .build(), + "ds-template-with-failure-store", + ComposableIndexTemplate.builder() + .indexPatterns(List.of(fsDataStreamName)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false, true)) + .template(new Template(null, null, null, null)) + .build() + ) + ) + .indices( + Map.of( + indexName, + indexMetadata, + ds1BackingIndex1.getIndex().getName(), + ds1BackingIndex1, + ds1BackingIndex2.getIndex().getName(), + ds1BackingIndex2, + ds2BackingIndex1.getIndex().getName(), + ds2BackingIndex1, + ds2FailureStore1.getIndex().getName(), + ds2FailureStore1 + ) + ) + .dataStreams(Map.of(dataStreamName, dataStream1, fsDataStreamName, dataStream2), Map.of()) + .build() + ) + .build(); + + private TestThreadPool threadPool; + + @Before + public void setupThreadpool() { + threadPool = new TestThreadPool(getClass().getName()); + } + + @After + public void tearDownThreadpool() { + terminate(threadPool); + } + + /** + * If a bulk operation begins and the cluster is experiencing a non-retryable block, the bulk operation should fail + */ + public void testClusterBlockedFailsBulk() { + NodeClient client = getNodeClient((r) -> { + fail("Should not have executed shard action on blocked cluster"); + return null; + }); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + // Not retryable + ClusterState state = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK).build()) + .build(); + + // Make sure we don't wait at all + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(state); + when(observer.isTimedOut()).thenReturn(false); + doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); + + newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); + + expectThrows(ExecutionException.class, ClusterBlockException.class, future::get); + } + + /** + * If a bulk operation times out while waiting for cluster blocks to be cleared, it should fail the request. + */ + public void testTimeoutOnRetryableClusterBlockedFailsBulk() { + NodeClient client = getNodeClient((r) -> { + fail("Should not have executed shard action on blocked cluster"); + return null; + }); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + // Retryable + final ClusterState state = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) + .build(); + + // Always return cluster state, first observation: return same cluster state, second observation: time out, ensure no further wait + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(state); + when(observer.isTimedOut()).thenReturn(false, true); + doAnswer((i) -> { + // Returning same state or timing out will result in one more attempt. + if (randomBoolean()) { + i.getArgument(0, ClusterStateObserver.Listener.class).onNewClusterState(state); + } else { + i.getArgument(0, ClusterStateObserver.Listener.class).onTimeout(null); + } + return null; + }).doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); + + newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); + + expectThrows(ExecutionException.class, ClusterBlockException.class, future::get); + verify(observer, times(2)).isTimedOut(); + verify(observer, times(1)).waitForNextChange(any()); + } + + /** + * If the cluster service closes while a bulk operation is waiting for cluster blocks to be cleared, it should fail the request. + */ + public void testNodeClosedOnRetryableClusterBlockedFailsBulk() { + NodeClient client = getNodeClient((r) -> { + fail("Should not have executed shard action on blocked cluster"); + return null; + }); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + // Retryable + final ClusterState state = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) + .build(); + + // Always return cluster state, first observation: signal cluster service closed, ensure no further wait + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(state); + when(observer.isTimedOut()).thenReturn(false); + doAnswer((i) -> { + i.getArgument(0, ClusterStateObserver.Listener.class).onClusterServiceClose(); + return null; + }).doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); + + newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); + + expectThrows(ExecutionException.class, NodeClosedException.class, future::get); + verify(observer, times(1)).isTimedOut(); + verify(observer, times(1)).waitForNextChange(any()); + } + + /** + * A bulk operation to an index should succeed if all of its shard level requests succeed + */ + public void testBulkToIndex() throws Exception { + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(indexName).id("1").source(Map.of("key", "val"))); + bulkRequest.add(new IndexRequest(indexName).id("3").source(Map.of("key", "val"))); + + NodeClient client = getNodeClient(this::acceptAllShardWrites); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(false)); + } + + /** + * A bulk operation to an index should partially succeed if only some of its shard level requests fail + */ + public void testBulkToIndexFailingEntireShard() throws Exception { + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(indexName).id("1").source(Map.of("key", "val"))); + bulkRequest.add(new IndexRequest(indexName).id("3").source(Map.of("key", "val"))); + + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(indexMetadata.getIndex(), 0), () -> new MapperException("test"))) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find failed item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("test"))); + } + + /** + * A bulk operation to a data stream should succeed if all of its shard level requests succeed + */ + public void testBulkToDataStream() throws Exception { + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(dataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(dataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + NodeClient client = getNodeClient(this::acceptAllShardWrites); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(false)); + } + + /** + * A bulk operation to a data stream should partially succeed if only some of its shard level requests fail + */ + public void testBulkToDataStreamFailingEntireShard() throws Exception { + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(dataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(dataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(ds1BackingIndex2.getIndex(), 0), () -> new MapperException("test"))) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find failed item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("test"))); + } + + /** + * A bulk operation to a data stream with a failure store enabled should redirect any shard level failures to the failure store. + */ + public void testFailingEntireShardRedirectsToFailureStore() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), () -> new MapperException("test"))) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(false)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem, is(notNullValue())); + } + + /** + * A bulk operation to a data stream with a failure store enabled should redirect any documents that fail at a shard level to the + * failure store. + */ + public void testFailingDocumentRedirectsToFailureStore() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + NodeClient client = getNodeClient( + thatFailsDocuments(Map.of(new IndexAndId(ds2BackingIndex1.getIndex().getName(), "3"), () -> new MapperException("test"))) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(false)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getIndex(), is(notNullValue())); + } + + /** + * A bulk operation to a data stream with a failure store enabled may still partially fail if the redirected documents experience + * a shard-level failure while writing to the failure store indices. + */ + public void testFailureStoreShardFailureRejectsDocument() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + // Mock client that rejects all shard requests on the first shard in the backing index, and all requests to the only shard of + // the failure store index. + NodeClient client = getNodeClient( + failingShards( + Map.of( + new ShardId(ds2BackingIndex1.getIndex(), 0), + () -> new MapperException("root cause"), + new ShardId(ds2FailureStore1.getIndex(), 0), + () -> new MapperException("failure store test failure") + ) + ) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("root cause"))); + assertThat(failedItem.getFailure().getCause().getSuppressed().length, is(not(equalTo(0)))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0], is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0].getMessage(), is(equalTo("failure store test failure"))); + } + + /** + * A document that fails at the shard level will be converted into a failure document if an applicable failure store is present. + * In the unlikely case that the failure document cannot be created, the document will not be redirected to the failure store and + * instead will simply report its original failure in the response, with the conversion failure present as a suppressed exception. + */ + public void testFailedDocumentCanNotBeConvertedFails() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + NodeClient client = getNodeClient( + thatFailsDocuments(Map.of(new IndexAndId(ds2BackingIndex1.getIndex().getName(), "3"), () -> new MapperException("root cause"))) + ); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + // Mock a failure store document converter that always fails + FailureStoreDocumentConverter mockConverter = mock(FailureStoreDocumentConverter.class); + when(mockConverter.transformFailedRequest(any(), any(), any(), any())).thenThrow(new IOException("Could not serialize json")); + + newBulkOperation(client, bulkRequest, mockConverter, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("root cause"))); + assertThat(failedItem.getFailure().getCause().getSuppressed().length, is(not(equalTo(0)))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0], is(instanceOf(IOException.class))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0].getMessage(), is(equalTo("Could not serialize json"))); + } + + /** + * A bulk operation to a data stream with a failure store enabled may still partially fail if the cluster is experiencing a + * non-retryable block when the redirected documents would be sent to the shard-level action. + */ + public void testBlockedClusterRejectsFailureStoreDocument() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + // Mock client that rejects all shard requests on the first shard in the backing index, and all requests to the only shard of + // the failure store index. + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), () -> new MapperException("root cause"))) + ); + + // Create a new cluster state that has a non-retryable cluster block on it + ClusterState blockedState = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(IndexMetadata.INDEX_READ_ONLY_BLOCK).build()) + .build(); + + // First time we will return the normal cluster state (before normal writes) which skips any further interactions, + // Second time we will return a blocked cluster state (before the redirects) causing us to start observing the cluster + // Finally, we will simulate the observer timing out causing the redirects to fail. + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(DEFAULT_STATE).thenReturn(blockedState); + when(observer.isTimedOut()).thenReturn(false); + doThrow(new AssertionError("Should not wait on non retryable block")).when(observer).waitForNextChange(any()); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("root cause"))); + assertThat(failedItem.getFailure().getCause().getSuppressed().length, is(not(equalTo(0)))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0], is(instanceOf(ClusterBlockException.class))); + assertThat( + failedItem.getFailure().getCause().getSuppressed()[0].getMessage(), + is(equalTo("blocked by: [FORBIDDEN/5/index read-only (api)];")) + ); + + verify(observer, times(0)).isTimedOut(); + verify(observer, times(0)).waitForNextChange(any()); + } + + /** + * A bulk operation to a data stream with a failure store enabled may still partially fail if the cluster times out while waiting for a + * retryable block to clear when the redirected documents would be sent to the shard-level action. + */ + public void testOperationTimeoutRejectsFailureStoreDocument() throws Exception { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + // Mock client that rejects all shard requests on the first shard in the backing index, and all requests to the only shard of + // the failure store index. + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), () -> new MapperException("root cause"))) + ); + + // Create a new cluster state that has a retryable cluster block on it + ClusterState blockedState = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) + .build(); + + // First time we will return the normal cluster state (before normal writes) which skips any further interactions, + // Second time we will return a blocked cluster state (before the redirects) causing us to start observing the cluster + // Finally, we will simulate the observer timing out causing the redirects to fail. + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(DEFAULT_STATE).thenReturn(blockedState); + when(observer.isTimedOut()).thenReturn(false, true); + doAnswer((i) -> { + // Returning same state or timing out will result in one more attempt. + if (randomBoolean()) { + i.getArgument(0, ClusterStateObserver.Listener.class).onNewClusterState(blockedState); + } else { + i.getArgument(0, ClusterStateObserver.Listener.class).onTimeout(null); + } + return null; + }).doThrow(new AssertionError("Should not wait any longer")).when(observer).waitForNextChange(any()); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); + + BulkResponse bulkItemResponses = future.get(); + assertThat(bulkItemResponses.hasFailures(), is(true)); + BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) + .filter(BulkItemResponse::isFailed) + .findFirst() + .orElseThrow(() -> new AssertionError("Could not find redirected item")); + assertThat(failedItem.getFailure().getCause(), is(instanceOf(MapperException.class))); + assertThat(failedItem.getFailure().getCause().getMessage(), is(equalTo("root cause"))); + assertThat(failedItem.getFailure().getCause().getSuppressed().length, is(not(equalTo(0)))); + assertThat(failedItem.getFailure().getCause().getSuppressed()[0], is(instanceOf(ClusterBlockException.class))); + assertThat( + failedItem.getFailure().getCause().getSuppressed()[0].getMessage(), + is(equalTo("blocked by: [SERVICE_UNAVAILABLE/2/no master];")) + ); + + verify(observer, times(2)).isTimedOut(); + verify(observer, times(1)).waitForNextChange(any()); + } + + /** + * A bulk operation to a data stream with a failure store enabled may completely fail if the cluster service closes out while waiting + * for a retryable block to clear when the redirected documents would be sent to the shard-level action. + */ + public void testNodeClosureRejectsFailureStoreDocument() { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + // Requests that go to two separate shards + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + bulkRequest.add(new IndexRequest(fsDataStreamName).id("3").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)); + + // Mock client that rejects all shard requests on the first shard in the backing index, and all requests to the only shard of + // the failure store index. + NodeClient client = getNodeClient( + failingShards(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), () -> new MapperException("root cause"))) + ); + + // Create a new cluster state that has a retryable cluster block on it + ClusterState blockedState = ClusterState.builder(DEFAULT_STATE) + .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) + .build(); + + // First time we will return the normal cluster state (before normal writes) which skips any further interactions, + // Second time we will return a blocked cluster state (before the redirects) causing us to start observing the cluster + // Finally, we will simulate the node closing causing the redirects to fail. + ClusterStateObserver observer = mock(ClusterStateObserver.class); + when(observer.setAndGetObservedState()).thenReturn(DEFAULT_STATE).thenReturn(blockedState); + when(observer.isTimedOut()).thenReturn(false, true); + doAnswer((i) -> { + i.getArgument(0, ClusterStateObserver.Listener.class).onClusterServiceClose(); + return null; + }).doThrow(new AssertionError("Should not wait any longer")).when(observer).waitForNextChange(any()); + + CompletableFuture future = new CompletableFuture<>(); + ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); + + newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); + + expectThrows(ExecutionException.class, NodeClosedException.class, future::get); + + verify(observer, times(1)).isTimedOut(); + verify(observer, times(1)).waitForNextChange(any()); + } + + /** + * Accepts all write operations from the given request object when it is encountered in the mock shard bulk action + */ + private BulkShardResponse acceptAllShardWrites(BulkShardRequest request) { + return new BulkShardResponse( + request.shardId(), + Arrays.stream(request.items()).map(item -> requestToResponse(request.shardId(), item)).toArray(BulkItemResponse[]::new) + ); + } + + /** + * Maps an entire shard id to an exception to throw when it is encountered in the mock shard bulk action + */ + private CheckedFunction failingShards(Map> shardsToFail) { + return (BulkShardRequest request) -> { + if (shardsToFail.containsKey(request.shardId())) { + throw shardsToFail.get(request.shardId()).get(); + } else { + return acceptAllShardWrites(request); + } + }; + } + + /** + * Index name / id tuple + */ + private record IndexAndId(String indexName, String id) {} + + /** + * Maps a document to an exception to thrown when it is encountered in the mock shard bulk action + */ + private CheckedFunction thatFailsDocuments( + Map> documentsToFail + ) { + return (BulkShardRequest request) -> new BulkShardResponse(request.shardId(), Arrays.stream(request.items()).map(item -> { + IndexAndId key = new IndexAndId(request.index(), item.request().id()); + if (documentsToFail.containsKey(key)) { + return requestToFailedResponse(item, documentsToFail.get(key).get()); + } else { + return requestToResponse(request.shardId(), item); + } + }).toArray(BulkItemResponse[]::new)); + } + + /** + * Create a shard-level result given a bulk item + */ + private static BulkItemResponse requestToResponse(ShardId shardId, BulkItemRequest itemRequest) { + return BulkItemResponse.success(itemRequest.id(), itemRequest.request().opType(), switch (itemRequest.request().opType()) { + case INDEX, CREATE -> new IndexResponse(shardId, itemRequest.request().id(), 1, 1, 1, true); + case UPDATE -> new UpdateResponse(shardId, itemRequest.request().id(), 1, 1, 1, DocWriteResponse.Result.UPDATED); + case DELETE -> new DeleteResponse(shardId, itemRequest.request().id(), 1, 1, 1, true); + }); + } + + /** + * Create a shard-level failure given a bulk item + */ + private static BulkItemResponse requestToFailedResponse(BulkItemRequest itemRequest, Exception reason) { + return BulkItemResponse.failure( + itemRequest.id(), + itemRequest.request().opType(), + new BulkItemResponse.Failure(itemRequest.index(), itemRequest.request().id(), reason) + ); + } + + /** + * Create a client that redirects expected actions to the provided function and fails if an unexpected operation happens. + * @param onShardAction Called when TransportShardBulkAction is executed. + * @return A node client for the test. + */ + private NodeClient getNodeClient(CheckedFunction onShardAction) { + return new NoOpNodeClient(threadPool) { + @Override + @SuppressWarnings("unchecked") + public Task executeLocally( + ActionType action, + Request request, + ActionListener listener + ) { + if (TransportShardBulkAction.TYPE.equals(action)) { + Response response = null; + Exception exception = null; + try { + response = (Response) onShardAction.apply((BulkShardRequest) request); + } catch (Exception responseException) { + exception = responseException; + } + if (response != null) { + listener.onResponse(response); + } else { + listener.onFailure(exception); + } + } else { + fail("Unexpected client call to " + action.name()); + } + return null; + } + }; + } + + private BulkOperation newBulkOperation(NodeClient client, BulkRequest request, ActionListener listener) { + return newBulkOperation( + DEFAULT_STATE, + client, + request, + new AtomicArray<>(request.numberOfActions()), + Map.of(), + mockObserver(DEFAULT_STATE), + listener, + new FailureStoreDocumentConverter() + ); + } + + private BulkOperation newBulkOperation( + NodeClient client, + BulkRequest request, + FailureStoreDocumentConverter failureStoreDocumentConverter, + ActionListener listener + ) { + return newBulkOperation( + DEFAULT_STATE, + client, + request, + new AtomicArray<>(request.numberOfActions()), + Map.of(), + mockObserver(DEFAULT_STATE), + listener, + failureStoreDocumentConverter + ); + } + + private BulkOperation newBulkOperation( + NodeClient client, + BulkRequest request, + ClusterState state, + ClusterStateObserver observer, + ActionListener listener + ) { + return newBulkOperation( + state, + client, + request, + new AtomicArray<>(request.numberOfActions()), + Map.of(), + observer, + listener, + new FailureStoreDocumentConverter() + ); + } + + private BulkOperation newBulkOperation( + ClusterState state, + NodeClient client, + BulkRequest request, + AtomicArray existingResponses, + Map indicesThatCanNotBeCreated, + ClusterStateObserver observer, + ActionListener listener, + FailureStoreDocumentConverter failureStoreDocumentConverter + ) { + // Time provision + long timeZero = TimeUnit.MILLISECONDS.toNanos(randomMillisUpToYear9999() - TimeUnit.DAYS.toMillis(1)); + long duration = TimeUnit.SECONDS.toNanos(randomLongBetween(1, 60)); + long endTime = timeZero + duration; + + // Expressions + ThreadContext ctx = threadPool.getThreadContext(); + IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(ctx, new SystemIndices(List.of())); + + // Mocks + final DiscoveryNode mockNode = mock(DiscoveryNode.class); + when(mockNode.getId()).thenReturn(randomAlphaOfLength(10)); + final ClusterService clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(state); + when(clusterService.localNode()).thenReturn(mockNode); + + return new BulkOperation( + null, + threadPool, + ThreadPool.Names.SAME, + clusterService, + request, + client, + existingResponses, + indicesThatCanNotBeCreated, + indexNameExpressionResolver, + () -> endTime, + timeZero, + listener, + observer, + failureStoreDocumentConverter + ); + } + + /** + * A default mock cluster state observer that simply returns the state + */ + private ClusterStateObserver mockObserver(ClusterState state) { + ClusterStateObserver mockObserver = mock(ClusterStateObserver.class); + when(mockObserver.setAndGetObservedState()).thenReturn(state); + when(mockObserver.isTimedOut()).thenReturn(false); + return mockObserver; + } +} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentTests.java b/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java similarity index 90% rename from server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentTests.java rename to server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java index 962c796e18c2a..67116bd40c2c8 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/FailureStoreDocumentConverterTests.java @@ -22,7 +22,7 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.CoreMatchers.startsWith; -public class FailureStoreDocumentTests extends ESTestCase { +public class FailureStoreDocumentConverterTests extends ESTestCase { public void testFailureStoreDocumentConverstion() throws Exception { IndexRequest source = new IndexRequest("original_index").routing("fake_routing") @@ -36,7 +36,12 @@ public void testFailureStoreDocumentConverstion() throws Exception { String targetIndexName = "rerouted_index"; long testTime = 1702357200000L; // 2023-12-12T05:00:00.000Z - IndexRequest convertedRequest = FailureStoreDocument.transformFailedRequest(source, exception, targetIndexName, () -> testTime); + IndexRequest convertedRequest = new FailureStoreDocumentConverter().transformFailedRequest( + source, + exception, + targetIndexName, + () -> testTime + ); // Retargeting write assertThat(convertedRequest.id(), is(nullValue())); @@ -63,7 +68,7 @@ public void testFailureStoreDocumentConverstion() throws Exception { ); assertThat( ObjectPath.eval("error.stack_trace", convertedRequest.sourceAsMap()), - containsString("at org.elasticsearch.action.bulk.FailureStoreDocumentTests.testFailureStoreDocumentConverstion") + containsString("at org.elasticsearch.action.bulk.FailureStoreDocumentConverterTests.testFailureStoreDocumentConverstion") ); assertThat(convertedRequest.isWriteToFailureStore(), is(true)); diff --git a/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java b/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java index 937ac2d26ebb9..b22a30b533dd2 100644 --- a/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/delete/DeleteResponseTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.delete; -import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponseTests; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -119,7 +119,7 @@ private static DeleteResponse parseInstance(XContentParser parser) throws IOExce DeleteResponse.Builder context = new DeleteResponse.Builder(); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - DocWriteResponse.parseInnerToXContent(parser, context); + BulkItemResponseTests.parseInnerToXContent(parser, context); } return context.build(); } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFilterTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFilterTests.java index ffdc7b9ca7652..478012567c1ae 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFilterTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFilterTests.java @@ -14,10 +14,10 @@ import org.elasticsearch.index.mapper.MapperServiceTestCase; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.plugins.FieldPredicate; import java.io.IOException; import java.util.Map; -import java.util.function.Predicate; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -46,7 +46,7 @@ public void testExcludeNestedFields() throws IOException { s -> true, new String[] { "-nested" }, Strings.EMPTY_ARRAY, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); @@ -74,7 +74,7 @@ public void testMetadataFilters() throws IOException { s -> true, new String[] { "+metadata" }, Strings.EMPTY_ARRAY, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); @@ -87,7 +87,7 @@ public void testMetadataFilters() throws IOException { s -> true, new String[] { "-metadata" }, Strings.EMPTY_ARRAY, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); @@ -120,7 +120,7 @@ public void testExcludeMultifields() throws IOException { s -> true, new String[] { "-multifield" }, Strings.EMPTY_ARRAY, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); @@ -151,7 +151,7 @@ public void testDontIncludeParentInfo() throws IOException { s -> true, new String[] { "-parent" }, Strings.EMPTY_ARRAY, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); @@ -171,7 +171,22 @@ public void testSecurityFilter() throws IOException { } } """); SearchExecutionContext sec = createSearchExecutionContext(mapperService); - Predicate securityFilter = f -> f.startsWith("permitted"); + FieldPredicate securityFilter = new FieldPredicate() { + @Override + public boolean test(String field) { + return field.startsWith("permitted"); + } + + @Override + public String modifyHash(String hash) { + return "only-permitted:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; { Map response = FieldCapabilitiesFetcher.retrieveFieldCaps( @@ -223,7 +238,7 @@ public void testFieldTypeFiltering() throws IOException { s -> true, Strings.EMPTY_ARRAY, new String[] { "text", "keyword" }, - f -> true, + FieldPredicate.ACCEPT_ALL, getMockIndexShard(), true ); diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java index c8a8c3853601d..878c35b449366 100644 --- a/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/index/IndexResponseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponseTests; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -126,7 +127,7 @@ private static IndexResponse parseInstanceFromXContent(XContentParser parser) th ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); IndexResponse.Builder context = new IndexResponse.Builder(); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - DocWriteResponse.parseInnerToXContent(parser, context); + BulkItemResponseTests.parseInnerToXContent(parser, context); } return context.build(); } diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java index d35162287e3ac..0eefeb87d3e02 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateResponseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.update; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponseTests; import org.elasticsearch.action.index.IndexResponseTests; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.common.Strings; @@ -214,7 +215,7 @@ public static void parseXContentFields(XContentParser parser, UpdateResponse.Bui context.setGetResult(GetResult.fromXContentEmbedded(parser)); } } else { - DocWriteResponse.parseInnerToXContent(parser, context); + BulkItemResponseTests.parseInnerToXContent(parser, context); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java index 637a18547b1b2..48d28462231a0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java @@ -12,8 +12,12 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTableGenerator; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; @@ -21,12 +25,16 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Predicate; import java.util.regex.Pattern; +import static java.util.Collections.emptyMap; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.hamcrest.CoreMatchers.equalTo; public class ClusterIndexHealthTests extends AbstractXContentSerializingTestCase { @@ -106,7 +114,7 @@ protected ClusterIndexHealth doParseInstance(XContentParser parser) throws IOExc XContentParser.Token token = parser.nextToken(); ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); String index = parser.currentName(); - ClusterIndexHealth parsed = ClusterIndexHealth.innerFromXContent(parser, index); + ClusterIndexHealth parsed = parseInstance(parser, index); ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); return parsed; } @@ -288,4 +296,66 @@ protected ClusterIndexHealth mutateInstance(ClusterIndexHealth instance) { throw new UnsupportedOperationException(); } } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "cluster_index_health", + true, + (parsedObjects, index) -> { + int i = 0; + int numberOfShards = (int) parsedObjects[i++]; + int numberOfReplicas = (int) parsedObjects[i++]; + int activeShards = (int) parsedObjects[i++]; + int relocatingShards = (int) parsedObjects[i++]; + int initializingShards = (int) parsedObjects[i++]; + int unassignedShards = (int) parsedObjects[i++]; + int activePrimaryShards = (int) parsedObjects[i++]; + String statusStr = (String) parsedObjects[i++]; + ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); + @SuppressWarnings("unchecked") + List shardList = (List) parsedObjects[i]; + final Map shards; + if (shardList == null || shardList.isEmpty()) { + shards = emptyMap(); + } else { + shards = Maps.newMapWithExpectedSize(shardList.size()); + for (ClusterShardHealth shardHealth : shardList) { + shards.put(shardHealth.getShardId(), shardHealth); + } + } + return new ClusterIndexHealth( + index, + numberOfShards, + numberOfReplicas, + activeShards, + relocatingShards, + initializingShards, + unassignedShards, + activePrimaryShards, + status, + shards + ); + } + ); + + public static final ObjectParser.NamedObjectParser SHARD_PARSER = ( + XContentParser p, + String indexIgnored, + String shardId) -> ClusterShardHealthTests.PARSER.apply(p, Integer.valueOf(shardId)); + + static { + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.NUMBER_OF_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.NUMBER_OF_REPLICAS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.ACTIVE_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.RELOCATING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.INITIALIZING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.UNASSIGNED_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterIndexHealth.ACTIVE_PRIMARY_SHARDS)); + PARSER.declareString(constructorArg(), new ParseField(ClusterIndexHealth.STATUS)); + // Can be absent if LEVEL == 'indices' or 'cluster' + PARSER.declareNamedObjects(optionalConstructorArg(), SHARD_PARSER, new ParseField(ClusterIndexHealth.SHARDS)); + } + + public static ClusterIndexHealth parseInstance(XContentParser parser, String index) { + return PARSER.apply(parser, index); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java index ce7c366ff30e6..1e1eacba183d2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterShardHealthTests.java @@ -9,17 +9,61 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Arrays; import java.util.function.Predicate; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + public class ClusterShardHealthTests extends AbstractXContentSerializingTestCase { + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "cluster_shard_health", + true, + (parsedObjects, shardId) -> { + int i = 0; + boolean primaryActive = (boolean) parsedObjects[i++]; + int activeShards = (int) parsedObjects[i++]; + int relocatingShards = (int) parsedObjects[i++]; + int initializingShards = (int) parsedObjects[i++]; + int unassignedShards = (int) parsedObjects[i++]; + String statusStr = (String) parsedObjects[i]; + ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); + return new ClusterShardHealth( + shardId, + status, + activeShards, + relocatingShards, + initializingShards, + unassignedShards, + primaryActive + ); + } + ); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField(ClusterShardHealth.PRIMARY_ACTIVE)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterShardHealth.ACTIVE_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterShardHealth.RELOCATING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterShardHealth.INITIALIZING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterShardHealth.UNASSIGNED_SHARDS)); + PARSER.declareString(constructorArg(), new ParseField(ClusterShardHealth.STATUS)); + } + @Override protected ClusterShardHealth doParseInstance(XContentParser parser) throws IOException { - return ClusterShardHealth.fromXContent(parser); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + XContentParser.Token token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + String shardIdStr = parser.currentName(); + ClusterShardHealth parsed = PARSER.apply(parser, Integer.valueOf(shardIdStr)); + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); + return parsed; } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index 1e35a40dedc17..955d7d2de6882 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.ESTestCase; @@ -786,7 +787,7 @@ public void testFindMappingsWithFilters() throws IOException { if (index.equals("index2")) { return Predicates.never(); } - return MapperPlugin.NOOP_FIELD_PREDICATE; + return FieldPredicate.ACCEPT_ALL; }, Metadata.ON_NEXT_INDEX_FIND_MAPPINGS_NOOP); assertIndexMappingsNoFields(mappings, "index2"); diff --git a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java index 351efa59f2381..67f74df78e256 100644 --- a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -9,12 +9,14 @@ package org.elasticsearch.common.collect; import org.elasticsearch.common.Randomness; +import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.NoSuchElementException; import java.util.Objects; @@ -242,6 +244,29 @@ public Integer next() { assertEquals(array.length, index.get()); } + public void testEnumerate() { + assertEmptyIterator(Iterators.enumerate(Iterators.concat(), Tuple::new)); + + final var array = randomIntegerArray(); + final var index = new AtomicInteger(); + Iterators.enumerate(Iterators.forArray(array), Tuple::new).forEachRemaining(t -> { + int idx = index.getAndIncrement(); + assertEquals(idx, t.v1().intValue()); + assertEquals(array[idx], t.v2()); + }); + assertEquals(array.length, index.get()); + } + + public void testSupplier() { + assertEmptyIterator(Iterators.fromSupplier(() -> null)); + + final var array = randomIntegerArray(); + final var index = new AtomicInteger(); + final var queue = new LinkedList<>(Arrays.asList(array)); + Iterators.fromSupplier(queue::pollFirst).forEachRemaining(i -> assertEquals(array[index.getAndIncrement()], i)); + assertEquals(array.length, index.get()); + } + public void testEquals() { final BiPredicate notCalled = (a, b) -> { throw new AssertionError("not called"); }; diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 43628fe59daa3..683bfb19aac26 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -90,9 +90,11 @@ public > IFD getForField(String type, String field if (docValues) { fieldType = new KeywordFieldMapper.Builder(fieldName, IndexVersion.current()).build(context).fieldType(); } else { - fieldType = new TextFieldMapper.Builder(fieldName, createDefaultIndexAnalyzers()).fielddata(true) - .build(context) - .fieldType(); + fieldType = new TextFieldMapper.Builder( + fieldName, + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true).build(context).fieldType(); } } else if (type.equals("float")) { fieldType = new NumberFieldMapper.Builder( diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java index 45ebfba265c2f..4df1961c123af 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTests.java @@ -52,10 +52,11 @@ public void testFilterByFrequency() throws Exception { { indexService.clearCaches(false, true); - MappedFieldType ft = new TextFieldMapper.Builder("high_freq", createDefaultIndexAnalyzers()).fielddata(true) - .fielddataFrequencyFilter(0, random.nextBoolean() ? 100 : 0.5d, 0) - .build(builderContext) - .fieldType(); + MappedFieldType ft = new TextFieldMapper.Builder( + "high_freq", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true).fielddataFrequencyFilter(0, random.nextBoolean() ? 100 : 0.5d, 0).build(builderContext).fieldType(); IndexOrdinalsFieldData fieldData = searchExecutionContext.getForField(ft, MappedFieldType.FielddataOperation.SEARCH); for (LeafReaderContext context : contexts) { LeafOrdinalsFieldData loadDirect = fieldData.loadDirect(context); @@ -67,7 +68,11 @@ public void testFilterByFrequency() throws Exception { } { indexService.clearCaches(false, true); - MappedFieldType ft = new TextFieldMapper.Builder("high_freq", createDefaultIndexAnalyzers()).fielddata(true) + MappedFieldType ft = new TextFieldMapper.Builder( + "high_freq", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true) .fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d / 200.0d, 201, 100) .build(builderContext) .fieldType(); @@ -82,7 +87,11 @@ public void testFilterByFrequency() throws Exception { { indexService.clearCaches(false, true);// test # docs with value - MappedFieldType ft = new TextFieldMapper.Builder("med_freq", createDefaultIndexAnalyzers()).fielddata(true) + MappedFieldType ft = new TextFieldMapper.Builder( + "med_freq", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true) .fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d / 200.0d, Integer.MAX_VALUE, 101) .build(builderContext) .fieldType(); @@ -98,7 +107,11 @@ public void testFilterByFrequency() throws Exception { { indexService.clearCaches(false, true); - MappedFieldType ft = new TextFieldMapper.Builder("med_freq", createDefaultIndexAnalyzers()).fielddata(true) + MappedFieldType ft = new TextFieldMapper.Builder( + "med_freq", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true) .fielddataFrequencyFilter(random.nextBoolean() ? 101 : 101d / 200.0d, Integer.MAX_VALUE, 101) .build(builderContext) .fieldType(); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java index bf9176de1b124..8c583fe3976fa 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java @@ -156,12 +156,16 @@ public void testClearField() throws Exception { ); final MapperBuilderContext context = MapperBuilderContext.root(false, false); - final MappedFieldType mapper1 = new TextFieldMapper.Builder("field_1", createDefaultIndexAnalyzers()).fielddata(true) - .build(context) - .fieldType(); - final MappedFieldType mapper2 = new TextFieldMapper.Builder("field_2", createDefaultIndexAnalyzers()).fielddata(true) - .build(context) - .fieldType(); + final MappedFieldType mapper1 = new TextFieldMapper.Builder( + "field_1", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true).build(context).fieldType(); + final MappedFieldType mapper2 = new TextFieldMapper.Builder( + "field_2", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true).build(context).fieldType(); final IndexWriter writer = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer())); Document doc = new Document(); doc.add(new StringField("field_1", "thisisastring", Store.NO)); @@ -223,9 +227,11 @@ public void testFieldDataCacheListener() throws Exception { ); final MapperBuilderContext context = MapperBuilderContext.root(false, false); - final MappedFieldType mapper1 = new TextFieldMapper.Builder("s", createDefaultIndexAnalyzers()).fielddata(true) - .build(context) - .fieldType(); + final MappedFieldType mapper1 = new TextFieldMapper.Builder( + "s", + createDefaultIndexAnalyzers(), + indexService.getIndexSettings().getMode().isSyntheticSourceEnabled() + ).fielddata(true).build(context).fieldType(); final IndexWriter writer = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer())); Document doc = new Document(); doc.add(new StringField("s", "thisisastring", Store.NO)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java index d55eaf9df3452..0cdc9568f1fac 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java @@ -55,6 +55,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -73,8 +74,8 @@ protected ScriptFactory dummyScript() { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true, false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true, false]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -104,7 +105,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of(1L, 0L, 1L))); + assertThat(results, containsInAnyOrder(1L, 0L, 1L)); } } } @@ -112,8 +113,8 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); BooleanScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -128,8 +129,8 @@ public void testSort() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); { @@ -185,10 +186,10 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true, false]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true, false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(3)); @@ -199,7 +200,7 @@ public void testExistsQuery() throws IOException { @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -210,7 +211,7 @@ public void testRangeQuery() throws IOException { } } try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -221,8 +222,8 @@ public void testRangeQuery() throws IOException { } } try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -269,7 +270,7 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termQuery(true, mockContext())), equalTo(1)); @@ -282,7 +283,7 @@ public void testTermQuery() throws IOException { } } try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termQuery(false, mockContext())), equalTo(1)); @@ -305,7 +306,7 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [true]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termsQuery(List.of(true, true), mockContext())), equalTo(1)); @@ -315,7 +316,7 @@ public void testTermsQuery() throws IOException { } } try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [false]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termsQuery(List.of(false, false), mockContext())), equalTo(1)); @@ -364,7 +365,7 @@ public XContentParser parser() { while (ctx.parser().nextToken() != Token.END_ARRAY) { ootb.parse(ctx); } - iw.addDocument(ctx.doc()); + addDocument(iw, ctx.doc()); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertSameCount( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java index 25a79022c245e..09d4b62fb157c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java @@ -60,6 +60,7 @@ import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -160,8 +161,8 @@ public void testFormatDuel() throws IOException { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356, 1595432181351]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356, 1595432181351]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -191,7 +192,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of(1595518581354L, 1595518581351L, 1595518581356L))); + assertThat(results, containsInAnyOrder(1595518581354L, 1595518581351L, 1595518581356L)); } } } @@ -199,9 +200,9 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); DateScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -220,9 +221,9 @@ public void testSort() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -300,8 +301,8 @@ private Query randomDistanceFeatureQuery(MappedFieldType ft, SearchExecutionCont @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(1)); @@ -312,9 +313,9 @@ public void testExistsQuery() throws IOException { @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -394,8 +395,8 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termQuery("2020-07-22T15:36:21.354Z", mockContext())), equalTo(1)); @@ -422,8 +423,8 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181355]}")))); try (DirectoryReader reader = iw.getReader()) { MappedFieldType ft = simpleMappedFieldType(); IndexSearcher searcher = newSearcher(reader); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java index 03716f8ad4497..9b66d0011ba69 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserContextTests.java @@ -20,9 +20,9 @@ public class DocumentParserContextTests extends ESTestCase { private final MapperBuilderContext root = MapperBuilderContext.root(false, false); public void testDynamicMapperSizeMultipleMappers() { - context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers()).build(root)); + context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers(), false).build(root)); assertEquals(1, context.getNewFieldsSize()); - context.addDynamicMapper(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers()).build(root)); + context.addDynamicMapper(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers(), false).build(root)); assertEquals(2, context.getNewFieldsSize()); context.addDynamicRuntimeField(new TestRuntimeField("runtime1", "keyword")); assertEquals(3, context.getNewFieldsSize()); @@ -37,9 +37,9 @@ public void testDynamicMapperSizeSameFieldMultipleRuntimeFields() { } public void testDynamicMapperSizeSameFieldMultipleMappers() { - context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers()).build(root)); + context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers(), false).build(root)); assertEquals(1, context.getNewFieldsSize()); - context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers()).build(root)); + context.addDynamicMapper(new TextFieldMapper.Builder("foo", createDefaultIndexAnalyzers(), false).build(root)); assertEquals(1, context.getNewFieldsSize()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java index ed365a2460203..9547b4f9cb9a3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java @@ -45,6 +45,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; public class DoubleScriptFieldTypeTests extends AbstractNonTextScriptFieldTypeTestCase { @@ -71,8 +72,8 @@ public void testFormat() throws IOException { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.0]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [3.14, 1.4]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.0]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [3.14, 1.4]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -102,7 +103,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of(2.0, 2.4, 4.140000000000001))); + assertThat(results, containsInAnyOrder(2.0, 2.4, 4.140000000000001)); } } } @@ -110,9 +111,9 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [4.2]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [4.2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); DoubleScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -128,9 +129,9 @@ public void testSort() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [4.2]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1.1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [4.2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -158,8 +159,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(1)); @@ -170,9 +171,9 @@ public void testExistsQuery() throws IOException { @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.5]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.5]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -195,8 +196,8 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termQuery("1", mockContext())), equalTo(1)); @@ -218,8 +219,8 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2.1]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termsQuery(List.of("1"), mockContext())), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java index 2b8be2882c409..ce406b604ba62 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -32,7 +33,6 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.function.Predicate; import static org.elasticsearch.cluster.metadata.MetadataTests.assertLeafs; import static org.elasticsearch.cluster.metadata.MetadataTests.assertMultiField; @@ -246,8 +246,23 @@ private static void assertNotFiltered(MappingMetadata mappingMetadata) { public static class FieldFilterPlugin extends Plugin implements MapperPlugin { @Override - public Function> getFieldFilter() { - return index -> index.equals("filtered") ? field -> field.endsWith("visible") : MapperPlugin.NOOP_FIELD_PREDICATE; + public Function getFieldFilter() { + return index -> false == index.equals("filtered") ? FieldPredicate.ACCEPT_ALL : new FieldPredicate() { + @Override + public boolean test(String field) { + return field.endsWith("visible"); + } + + @Override + public String modifyHash(String hash) { + return "only-visible:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java index 36f691341425c..3289e46941a45 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java @@ -44,6 +44,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; public class GeoPointScriptFieldTypeTests extends AbstractNonTextScriptFieldTypeTestCase { @@ -71,8 +72,8 @@ protected boolean supportsRangeQueries() { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -103,7 +104,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of(new GeoPoint(45.0, 45.0), new GeoPoint(0.0, 0.0)))); + assertThat(results, containsInAnyOrder(new GeoPoint(45.0, 45.0), new GeoPoint(0.0, 0.0))); } } } @@ -117,7 +118,7 @@ public void testSort() throws IOException { public void testFetch() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef(""" + addDocument(iw, List.of(new StoredField("_source", new BytesRef(""" {"foo": {"lat": 45.0, "lon" : 45.0}}""")))); try (DirectoryReader reader = iw.getReader()) { SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -138,8 +139,8 @@ public void testFetch() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -167,8 +168,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 45.0, \"lon\" : 45.0}}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": {\"lat\": 0.0, \"lon\" : 0.0}}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java index 5eb66e631d86f..4726424ada5f2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java @@ -49,6 +49,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; @@ -75,8 +76,8 @@ public void testFormat() throws IOException { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.2\", \"192.168.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.2\", \"192.168.1\"]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -107,7 +108,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of("192.168.0.1", "192.168.1.1", "192.168.2.1"))); + assertThat(results, containsInAnyOrder("192.168.0.1", "192.168.1.1", "192.168.2.1")); } } } @@ -115,9 +116,9 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.4\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.2\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.4\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.2\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); BinaryScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -142,9 +143,9 @@ public void testSort() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.4\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.2\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.4\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.2\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -172,8 +173,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(1)); @@ -184,9 +185,9 @@ public void testExistsQuery() throws IOException { @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"1.1.1.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"1.1.1.1\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat( @@ -207,9 +208,9 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); IpScriptFieldType fieldType = build("append_param", Map.of("param", ".1"), OnScriptError.FAIL); @@ -229,10 +230,10 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0.1\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"1.1.1.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"192.168.1.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"200.0.0.1\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"1.1.1.1\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java index d8903251e6c3b..6912194625bb7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java @@ -49,6 +49,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; public class KeywordScriptFieldTypeTests extends AbstractScriptFieldTypeTestCase { @@ -66,8 +67,8 @@ protected ScriptFactory dummyScript() { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2, 1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2, 1]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -97,7 +98,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of("1-suffix", "1-suffix", "2-suffix"))); + assertThat(results, containsInAnyOrder("1-suffix", "1-suffix", "2-suffix")); } } } @@ -105,9 +106,9 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"a\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"d\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"a\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"d\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); BinaryScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -123,9 +124,9 @@ public void testSort() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"a\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aaa\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aa\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"a\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aaa\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aa\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -153,8 +154,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(1)); @@ -164,11 +165,11 @@ public void testExistsQuery() throws IOException { public void testFuzzyQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); // No edits, matches - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"caat\"]}")))); // Single insertion, matches - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cta\"]}")))); // Single transposition, matches - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"caaat\"]}")))); // Two insertions, no match - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); // Totally wrong, no match + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); // No edits, matches + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"caat\"]}")))); // Single insertion, matches + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cta\"]}")))); // Single transposition, matches + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"caaat\"]}")))); // Two insertions, no match + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); // Totally wrong, no match try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat( @@ -200,9 +201,9 @@ private Query randomFuzzyQuery(MappedFieldType ft, SearchExecutionContext ctx) { public void testPrefixQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().prefixQuery("cat", null, mockContext())), equalTo(2)); @@ -225,9 +226,9 @@ private Query randomPrefixQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat( @@ -268,9 +269,9 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) public void testRegexpQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cat\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"cata\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"dog\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat( @@ -294,8 +295,8 @@ private Query randomRegexpQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); KeywordScriptFieldType fieldType = build("append_param", Map.of("param", "-suffix"), OnScriptError.FAIL); @@ -312,10 +313,10 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [3]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [3]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termsQuery(List.of("1", "2"), mockContext())), equalTo(2)); @@ -330,8 +331,8 @@ protected Query randomTermsQuery(MappedFieldType ft, SearchExecutionContext ctx) public void testWildcardQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aab\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aab\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().wildcardQuery("a*b", null, mockContext())), equalTo(1)); @@ -342,8 +343,8 @@ public void testWildcardQuery() throws IOException { // Normalized WildcardQueries are requested by the QueryStringQueryParser public void testNormalizedWildcardQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aab\"]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"aab\"]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [\"b\"]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().normalizedWildcardQuery("a*b", null, mockContext())), equalTo(1)); @@ -365,8 +366,8 @@ private Query randomWildcardQuery(MappedFieldType ft, SearchExecutionContext ctx public void testMatchQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); KeywordScriptFieldType fieldType = build("append_param", Map.of("param", "-Suffix"), OnScriptError.FAIL); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java index debcd3c5fa911..83b3dbe858471 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java @@ -47,6 +47,7 @@ import java.util.Map; import static java.util.Collections.emptyMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -83,8 +84,8 @@ public void testLongFromSource() throws IOException { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2, 1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2, 1]}")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -114,7 +115,7 @@ public void collect(int doc) throws IOException { }; } }); - assertThat(results, equalTo(List.of(2L, 2L, 3L))); + assertThat(results, containsInAnyOrder(2L, 2L, 3L)); } } } @@ -122,9 +123,9 @@ public void collect(int doc) throws IOException { @Override public void testSort() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); LongScriptFieldData ifd = simpleMappedFieldType().fielddataBuilder(mockFielddataContext()).build(null, null); @@ -139,9 +140,9 @@ public void testSort() throws IOException { public void testNow() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181354]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181351]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"timestamp\": [1595432181356]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); LongScriptFieldData ifd = build("millis_ago", Map.of(), OnScriptError.FAIL).fielddataBuilder(mockFielddataContext()) @@ -164,9 +165,9 @@ public void testNow() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [4]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -194,8 +195,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": []}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(1)); @@ -206,8 +207,8 @@ public void testExistsQuery() throws IOException { @Override public void testRangeQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); MappedFieldType ft = simpleMappedFieldType(); @@ -228,8 +229,8 @@ protected Query randomRangeQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termQuery("1", mockContext())), equalTo(1)); @@ -251,8 +252,8 @@ protected Query randomTermQuery(MappedFieldType ft, SearchExecutionContext ctx) @Override public void testTermsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [1]}")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": [2]}")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().termsQuery(List.of("1"), mockContext())), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java new file mode 100644 index 0000000000000..01cbe496e6a3d --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.script.ScriptCompiler; +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; + +public class MultiFieldsTests extends ESTestCase { + + public void testMultiFieldsBuilderHasSyntheticSourceCompatibleKeywordField() { + var isStored = randomBoolean(); + var hasNormalizer = randomBoolean(); + + var builder = new FieldMapper.MultiFields.Builder(); + assertFalse(builder.hasSyntheticSourceCompatibleKeywordField()); + + var keywordFieldMapperBuilder = getKeywordFieldMapperBuilder(isStored, hasNormalizer); + builder.add(keywordFieldMapperBuilder); + + var expected = hasNormalizer == false; + assertEquals(expected, builder.hasSyntheticSourceCompatibleKeywordField()); + } + + public void testMultiFieldsBuilderHasSyntheticSourceCompatibleKeywordFieldDuringMerge() { + var isStored = randomBoolean(); + var hasNormalizer = randomBoolean(); + + var builder = new TextFieldMapper.Builder("text_field", createDefaultIndexAnalyzers(), false); + assertFalse(builder.multiFieldsBuilder.hasSyntheticSourceCompatibleKeywordField()); + + var keywordFieldMapperBuilder = getKeywordFieldMapperBuilder(isStored, hasNormalizer); + + var newField = new TextFieldMapper.Builder("text_field", createDefaultIndexAnalyzers(), false).addMultiField( + keywordFieldMapperBuilder + ).build(MapperBuilderContext.root(false, false)); + + builder.merge(newField, new FieldMapper.Conflicts("TextFieldMapper"), MapperMergeContext.root(false, false, Long.MAX_VALUE)); + + var expected = hasNormalizer == false; + assertEquals(expected, builder.multiFieldsBuilder.hasSyntheticSourceCompatibleKeywordField()); + } + + private KeywordFieldMapper.Builder getKeywordFieldMapperBuilder(boolean isStored, boolean hasNormalizer) { + var keywordFieldMapperBuilder = new KeywordFieldMapper.Builder( + "field", + IndexAnalyzers.of(Map.of(), Map.of("normalizer", Lucene.STANDARD_ANALYZER), Map.of()), + ScriptCompiler.NONE, + IndexVersion.current() + ); + if (isStored) { + keywordFieldMapperBuilder.stored(true); + if (randomBoolean()) { + keywordFieldMapperBuilder.docValues(false); + } + } + if (hasNormalizer) { + keywordFieldMapperBuilder.normalizer("normalizer"); + } + return keywordFieldMapperBuilder; + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index e024f2fa7b1ea..3c4aca4d36284 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -27,10 +27,10 @@ private RootObjectMapper createMapping( rootBuilder.add(new ObjectMapper.Builder("disabled", Explicit.IMPLICIT_TRUE).enabled(disabledFieldEnabled)); ObjectMapper.Builder fooBuilder = new ObjectMapper.Builder("foo", Explicit.IMPLICIT_TRUE).enabled(fooFieldEnabled); if (includeBarField) { - fooBuilder.add(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers())); + fooBuilder.add(new TextFieldMapper.Builder("bar", createDefaultIndexAnalyzers(), false)); } if (includeBazField) { - fooBuilder.add(new TextFieldMapper.Builder("baz", createDefaultIndexAnalyzers())); + fooBuilder.add(new TextFieldMapper.Builder("baz", createDefaultIndexAnalyzers(), false)); } rootBuilder.add(fooBuilder); return rootBuilder.build(MapperBuilderContext.root(false, false)); @@ -366,7 +366,7 @@ private TextFieldMapper.Builder createTextKeywordMultiField(String name) { } private TextFieldMapper.Builder createTextKeywordMultiField(String name, String multiFieldName) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()); + TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers(), false); builder.multiFieldsBuilder.add(new KeywordFieldMapper.Builder(multiFieldName, IndexVersion.current())); return builder; } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 6472f09ce1be7..74b293ca7d6d6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -530,11 +530,11 @@ public void testSyntheticSourceDocValuesFieldWithout() throws IOException { public void testNestedObjectWithMultiFieldsgetTotalFieldsCount() { ObjectMapper.Builder mapperBuilder = new ObjectMapper.Builder("parent_size_1", Explicit.IMPLICIT_TRUE).add( new ObjectMapper.Builder("child_size_2", Explicit.IMPLICIT_TRUE).add( - new TextFieldMapper.Builder("grand_child_size_3", createDefaultIndexAnalyzers()).addMultiField( + new TextFieldMapper.Builder("grand_child_size_3", createDefaultIndexAnalyzers(), false).addMultiField( new KeywordFieldMapper.Builder("multi_field_size_4", IndexVersion.current()) ) .addMultiField( - new TextFieldMapper.Builder("grand_child_size_5", createDefaultIndexAnalyzers()).addMultiField( + new TextFieldMapper.Builder("grand_child_size_5", createDefaultIndexAnalyzers(), false).addMultiField( new KeywordFieldMapper.Builder("multi_field_of_multi_field_size_6", IndexVersion.current()) ) ) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java index 8cb3ecef4c35c..def8841045746 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.AnalysisMode; @@ -67,6 +68,9 @@ public void testParseTextFieldCheckAnalyzerAnalysisMode() { fieldNode.put("analyzer", "my_analyzer"); MappingParserContext parserContext = mock(MappingParserContext.class); when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.current()); + when(parserContext.getIndexSettings()).thenReturn( + new IndexSettings(IndexMetadata.builder("index").settings(indexSettings(IndexVersion.current(), 1, 0)).build(), Settings.EMPTY) + ); // check AnalysisMode.ALL works Map analyzers = defaultAnalyzers(); @@ -102,6 +106,12 @@ public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() { } MappingParserContext parserContext = mock(MappingParserContext.class); when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.current()); + when(parserContext.getIndexSettings()).thenReturn( + new IndexSettings( + IndexMetadata.builder("index").settings(indexSettings(IndexVersion.current(), 1, 0)).build(), + Settings.EMPTY + ) + ); // check AnalysisMode.ALL and AnalysisMode.SEARCH_TIME works Map analyzers = defaultAnalyzers(); @@ -143,6 +153,9 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { fieldNode.put("analyzer", "my_analyzer"); MappingParserContext parserContext = mock(MappingParserContext.class); when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.current()); + when(parserContext.getIndexSettings()).thenReturn( + new IndexSettings(IndexMetadata.builder("index").settings(indexSettings(IndexVersion.current(), 1, 0)).build(), Settings.EMPTY) + ); // check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked if there is no search analyzer AnalysisMode mode = AnalysisMode.INDEX_TIME; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index f92867d1ce461..1c5ae3baca827 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -44,9 +44,11 @@ import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; import org.apache.lucene.tests.analysis.Token; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AnalyzerScope; @@ -249,6 +251,64 @@ public void testDefaults() throws IOException { assertEquals(DocValuesType.NONE, fieldType.docValuesType()); } + public void testStoreParameterDefaults() throws IOException { + var timeSeriesIndexMode = randomBoolean(); + var isStored = randomBoolean(); + var hasKeywordFieldForSyntheticSource = randomBoolean(); + + var indexSettingsBuilder = getIndexSettingsBuilder(); + if (timeSeriesIndexMode) { + indexSettingsBuilder.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "dimension") + .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), "2000-01-08T23:40:53.384Z") + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), "2106-01-08T23:40:53.384Z"); + } + var indexSettings = indexSettingsBuilder.build(); + + var mapping = mapping(b -> { + b.startObject("field"); + b.field("type", "text"); + if (isStored) { + b.field("store", isStored); + } + if (hasKeywordFieldForSyntheticSource) { + b.startObject("fields"); + b.startObject("keyword"); + b.field("type", "keyword"); + b.endObject(); + b.endObject(); + } + b.endObject(); + + if (timeSeriesIndexMode) { + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("dimension"); + b.field("type", "keyword"); + b.field("time_series_dimension", "true"); + b.endObject(); + } + }); + DocumentMapper mapper = createMapperService(getVersion(), indexSettings, () -> true, mapping).documentMapper(); + + var source = source(TimeSeriesRoutingHashFieldMapper.DUMMY_ENCODED_VALUE, b -> { + b.field("field", "1234"); + if (timeSeriesIndexMode) { + b.field("@timestamp", randomMillisUpToYear9999()); + b.field("dimension", "dimension1"); + } + }, null); + ParsedDocument doc = mapper.parse(source); + List fields = doc.rootDoc().getFields("field"); + IndexableFieldType fieldType = fields.get(0).fieldType(); + if (isStored || (timeSeriesIndexMode && hasKeywordFieldForSyntheticSource == false)) { + assertTrue(fieldType.stored()); + } else { + assertFalse(fieldType.stored()); + } + } + public void testBWCSerialization() throws IOException { MapperService mapperService = createMapperService(fieldMapping(b -> { b.field("type", "text"); @@ -1138,7 +1198,8 @@ public SyntheticSourceExample example(int maxValues) { delegate.expectedForSyntheticSource(), delegate.expectedForBlockLoader(), b -> { - b.field("type", "text").field("store", true); + b.field("type", "text"); + b.field("store", true); if (indexText == false) { b.field("index", false); } @@ -1196,6 +1257,17 @@ public List invalidExample() throws IOException { b.endObject(); } b.endObject(); + }), + new SyntheticSourceInvalidExample(err, b -> { + b.field("type", "text"); + b.startObject("fields"); + { + b.startObject("kwd"); + b.field("type", "keyword"); + b.field("doc_values", "false"); + b.endObject(); + } + b.endObject(); }) ); } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index cade1e66c7fc7..0216bad7cf7a3 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesRoutingHashFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.index.IndexVersionUtils; @@ -44,11 +45,11 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.function.Predicate; import static org.elasticsearch.test.LambdaMatchers.falseWith; import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -246,24 +247,24 @@ public void testGetFieldFilter() { List mapperPlugins = List.of(new MapperPlugin() { }, new MapperPlugin() { @Override - public Function> getFieldFilter() { - return index -> index.equals("hidden_index") ? field -> false : MapperPlugin.NOOP_FIELD_PREDICATE; + public Function getFieldFilter() { + return index -> index.equals("hidden_index") ? HIDDEN_INDEX : FieldPredicate.ACCEPT_ALL; } }, new MapperPlugin() { @Override - public Function> getFieldFilter() { - return index -> field -> field.equals("hidden_field") == false; + public Function getFieldFilter() { + return index -> HIDDEN_FIELD; } }, new MapperPlugin() { @Override - public Function> getFieldFilter() { - return index -> index.equals("filtered") ? field -> field.equals("visible") : MapperPlugin.NOOP_FIELD_PREDICATE; + public Function getFieldFilter() { + return index -> index.equals("filtered") ? ONLY_VISIBLE : FieldPredicate.ACCEPT_ALL; } }); IndicesModule indicesModule = new IndicesModule(mapperPlugins); MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); - Function> fieldFilter = mapperRegistry.getFieldFilter(); + Function fieldFilter = mapperRegistry.getFieldFilter(); assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); assertThat(fieldFilter.apply("hidden_index"), falseWith(randomAlphaOfLengthBetween(3, 5))); @@ -276,6 +277,10 @@ public Function> getFieldFilter() { assertThat(fieldFilter.apply("hidden_index"), falseWith("visible")); assertThat(fieldFilter.apply(randomAlphaOfLengthBetween(3, 5)), trueWith("visible")); assertThat(fieldFilter.apply("hidden_index"), falseWith("hidden_field")); + + assertThat(fieldFilter.apply("filtered").modifyHash("hash"), equalTo("only-visible:hide-field:hash")); + assertThat(fieldFilter.apply(randomAlphaOfLengthBetween(3, 5)).modifyHash("hash"), equalTo("hide-field:hash")); + assertThat(fieldFilter.apply("hidden_index").modifyHash("hash"), equalTo("hide-field:hidden:hash")); } public void testDefaultFieldFilterIsNoOp() { @@ -286,7 +291,7 @@ public void testDefaultFieldFilterIsNoOp() { }); } IndicesModule indicesModule = new IndicesModule(mapperPlugins); - Function> fieldFilter = indicesModule.getMapperRegistry().getFieldFilter(); + Function fieldFilter = indicesModule.getMapperRegistry().getFieldFilter(); assertSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); } @@ -294,21 +299,72 @@ public void testNoOpFieldPredicate() { List mapperPlugins = Arrays.asList(new MapperPlugin() { }, new MapperPlugin() { @Override - public Function> getFieldFilter() { - return index -> index.equals("hidden_index") ? field -> false : MapperPlugin.NOOP_FIELD_PREDICATE; + public Function getFieldFilter() { + return index -> index.equals("hidden_index") ? HIDDEN_INDEX : FieldPredicate.ACCEPT_ALL; } }, new MapperPlugin() { @Override - public Function> getFieldFilter() { - return index -> index.equals("filtered") ? field -> field.equals("visible") : MapperPlugin.NOOP_FIELD_PREDICATE; + public Function getFieldFilter() { + return index -> index.equals("filtered") ? ONLY_VISIBLE : FieldPredicate.ACCEPT_ALL; } }); IndicesModule indicesModule = new IndicesModule(mapperPlugins); MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); - Function> fieldFilter = mapperRegistry.getFieldFilter(); - assertSame(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply(randomAlphaOfLengthBetween(3, 7))); - assertNotSame(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("hidden_index")); - assertNotSame(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("filtered")); + Function fieldFilter = mapperRegistry.getFieldFilter(); + assertSame(FieldPredicate.ACCEPT_ALL, fieldFilter.apply(randomAlphaOfLengthBetween(3, 7))); + assertNotSame(FieldPredicate.ACCEPT_ALL, fieldFilter.apply("hidden_index")); + assertNotSame(FieldPredicate.ACCEPT_ALL, fieldFilter.apply("filtered")); } + + private static final FieldPredicate HIDDEN_INDEX = new FieldPredicate() { + @Override + public boolean test(String field) { + return false; + } + + @Override + public String modifyHash(String hash) { + return "hidden:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; + + private static final FieldPredicate HIDDEN_FIELD = new FieldPredicate() { + @Override + public boolean test(String field) { + return false == field.equals("hidden_field"); + } + + @Override + public String modifyHash(String hash) { + return "hide-field:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; + + private static final FieldPredicate ONLY_VISIBLE = new FieldPredicate() { + @Override + public boolean test(String field) { + return field.equals("visible"); + } + + @Override + public String modifyHash(String hash) { + return "only-visible:" + hash; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index c521ab5e047aa..4e4f5c9c0ddfa 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -317,7 +317,11 @@ public void testBuildSearchContextHighlight() throws IOException { ) { @Override public MappedFieldType getFieldType(String name) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()); + TextFieldMapper.Builder builder = new TextFieldMapper.Builder( + name, + createDefaultIndexAnalyzers(), + idxSettings.getMode().isSyntheticSourceEnabled() + ); return builder.build(MapperBuilderContext.root(false, false)).fieldType(); } }; diff --git a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java index c89edb29b5058..4855a043c565a 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResultTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.profile; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.profile.query.QueryProfileShardResultTests; @@ -48,7 +49,7 @@ protected Reader instanceReader() { @Override protected SearchProfileDfsPhaseResult doParseInstance(XContentParser parser) throws IOException { - return SearchProfileDfsPhaseResult.fromXContent(parser); + return SearchResponseUtils.parseProfileDfsPhaseResult(parser); } public void testCombineQueryProfileShardResults() { diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java index f28425172ead5..56520c0c6d033 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfileShardResultTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.profile.query; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.search.profile.ProfileResultTests; import org.elasticsearch.test.AbstractXContentSerializingTestCase; @@ -51,7 +52,7 @@ protected QueryProfileShardResult mutateInstance(QueryProfileShardResult instanc @Override protected QueryProfileShardResult doParseInstance(XContentParser parser) throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - QueryProfileShardResult result = QueryProfileShardResult.fromXContent(parser); + QueryProfileShardResult result = SearchResponseUtils.parseQueryProfileShardResult(parser); ensureExpectedToken(null, parser.nextToken(), parser); return result; } diff --git a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java index 0ade522ae1ffa..7113117a4d7fa 100644 --- a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java @@ -160,7 +160,11 @@ public void testBuildRescoreSearchContext() throws ElasticsearchParseException, ) { @Override public MappedFieldType getFieldType(String name) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()); + TextFieldMapper.Builder builder = new TextFieldMapper.Builder( + name, + createDefaultIndexAnalyzers(), + idxSettings.getMode().isSyntheticSourceEnabled() + ); return builder.build(MapperBuilderContext.root(false, false)).fieldType(); } }; @@ -222,7 +226,11 @@ public void testRewritingKeepsSettings() throws IOException { ) { @Override public MappedFieldType getFieldType(String name) { - TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()); + TextFieldMapper.Builder builder = new TextFieldMapper.Builder( + name, + createDefaultIndexAnalyzers(), + idxSettings.getMode().isSyntheticSourceEnabled() + ); return builder.build(MapperBuilderContext.root(false, false)).fieldType(); } }; diff --git a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldFuzzyQueryTests.java b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldFuzzyQueryTests.java index 86486cac893cf..3ded47b6d2671 100644 --- a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldFuzzyQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldFuzzyQueryTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.runtime; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.elasticsearch.script.Script; @@ -68,18 +69,19 @@ protected StringScriptFieldFuzzyQuery mutate(StringScriptFieldFuzzyQuery orig) { @Override public void testMatches() { StringScriptFieldFuzzyQuery query = StringScriptFieldFuzzyQuery.build(randomScript(), leafFactory, "test", "foo", 1, 0, false); - assertTrue(query.matches(List.of("foo"))); - assertTrue(query.matches(List.of("foa"))); - assertTrue(query.matches(List.of("foo", "bar"))); - assertFalse(query.matches(List.of("bar"))); + BytesRefBuilder scratch = new BytesRefBuilder(); + assertTrue(query.matches(List.of("foo"), scratch)); + assertTrue(query.matches(List.of("foa"), scratch)); + assertTrue(query.matches(List.of("foo", "bar"), scratch)); + assertFalse(query.matches(List.of("bar"), scratch)); query = StringScriptFieldFuzzyQuery.build(randomScript(), leafFactory, "test", "foo", 0, 0, false); - assertTrue(query.matches(List.of("foo"))); - assertFalse(query.matches(List.of("foa"))); + assertTrue(query.matches(List.of("foo"), scratch)); + assertFalse(query.matches(List.of("foa"), scratch)); query = StringScriptFieldFuzzyQuery.build(randomScript(), leafFactory, "test", "foo", 2, 0, false); - assertTrue(query.matches(List.of("foo"))); - assertTrue(query.matches(List.of("foa"))); - assertTrue(query.matches(List.of("faa"))); - assertFalse(query.matches(List.of("faaa"))); + assertTrue(query.matches(List.of("foo"), scratch)); + assertTrue(query.matches(List.of("foa"), scratch)); + assertTrue(query.matches(List.of("faa"), scratch)); + assertFalse(query.matches(List.of("faaa"), scratch)); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldRegexpQueryTests.java b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldRegexpQueryTests.java index 50c6786de1282..46f841c344e5f 100644 --- a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldRegexpQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldRegexpQueryTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.runtime; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; @@ -84,13 +85,14 @@ public void testMatches() { 0, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT ); - assertTrue(query.matches(List.of("astuffb"))); - assertFalse(query.matches(List.of("astuffB"))); - assertFalse(query.matches(List.of("fffff"))); - assertFalse(query.matches(List.of("ab"))); - assertFalse(query.matches(List.of("aasdf"))); - assertFalse(query.matches(List.of("dsfb"))); - assertTrue(query.matches(List.of("astuffb", "fffff"))); + BytesRefBuilder scratch = new BytesRefBuilder(); + assertTrue(query.matches(List.of("astuffb"), scratch)); + assertFalse(query.matches(List.of("astuffB"), scratch)); + assertFalse(query.matches(List.of("fffff"), scratch)); + assertFalse(query.matches(List.of("ab"), scratch)); + assertFalse(query.matches(List.of("aasdf"), scratch)); + assertFalse(query.matches(List.of("dsfb"), scratch)); + assertTrue(query.matches(List.of("astuffb", "fffff"), scratch)); StringScriptFieldRegexpQuery ciQuery = new StringScriptFieldRegexpQuery( randomScript(), @@ -101,9 +103,8 @@ public void testMatches() { RegExp.ASCII_CASE_INSENSITIVE, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT ); - assertTrue(ciQuery.matches(List.of("astuffB"))); - assertTrue(ciQuery.matches(List.of("Astuffb", "fffff"))); - + assertTrue(ciQuery.matches(List.of("astuffB"), scratch)); + assertTrue(ciQuery.matches(List.of("Astuffb", "fffff"), scratch)); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldWildcardQueryTests.java b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldWildcardQueryTests.java index 37e24553f9fce..f6cd59f4254ad 100644 --- a/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldWildcardQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldWildcardQueryTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.runtime; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.elasticsearch.script.Script; @@ -52,18 +53,19 @@ protected StringScriptFieldWildcardQuery mutate(StringScriptFieldWildcardQuery o @Override public void testMatches() { StringScriptFieldWildcardQuery query = new StringScriptFieldWildcardQuery(randomScript(), leafFactory, "test", "a*b", false); - assertTrue(query.matches(List.of("astuffb"))); - assertFalse(query.matches(List.of("Astuffb"))); - assertFalse(query.matches(List.of("fffff"))); - assertFalse(query.matches(List.of("a"))); - assertFalse(query.matches(List.of("b"))); - assertFalse(query.matches(List.of("aasdf"))); - assertFalse(query.matches(List.of("dsfb"))); - assertTrue(query.matches(List.of("astuffb", "fffff"))); + BytesRefBuilder scratch = new BytesRefBuilder(); + assertTrue(query.matches(List.of("astuffb"), scratch)); + assertFalse(query.matches(List.of("Astuffb"), scratch)); + assertFalse(query.matches(List.of("fffff"), scratch)); + assertFalse(query.matches(List.of("a"), scratch)); + assertFalse(query.matches(List.of("b"), scratch)); + assertFalse(query.matches(List.of("aasdf"), scratch)); + assertFalse(query.matches(List.of("dsfb"), scratch)); + assertTrue(query.matches(List.of("astuffb", "fffff"), scratch)); StringScriptFieldWildcardQuery ciQuery = new StringScriptFieldWildcardQuery(randomScript(), leafFactory, "test", "a*b", true); - assertTrue(ciQuery.matches(List.of("Astuffb"))); - assertTrue(ciQuery.matches(List.of("astuffB", "fffff"))); + assertTrue(ciQuery.matches(List.of("Astuffb"), scratch)); + assertTrue(ciQuery.matches(List.of("astuffB", "fffff"), scratch)); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java index ea97bafc5e4c8..675b5959f35a3 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldTypeTestCase.java @@ -13,6 +13,7 @@ import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -64,6 +65,18 @@ public abstract class AbstractScriptFieldTypeTestCase extends MapperServiceTestC protected abstract String typeName(); + /** + * Add the provided document to the provided writer, and randomly flush. + * This is useful for situations where there are not enough documents indexed to trigger random flush and commit performed + * by {@link RandomIndexWriter}. Flushing is important to obtain multiple slices and inter-segment concurrency. + */ + protected static void addDocument(RandomIndexWriter iw, Iterable indexableFields) throws IOException { + iw.addDocument(indexableFields); + if (randomBoolean()) { + iw.flush(); + } + } + public final void testMinimalSerializesToItself() throws IOException { XContentBuilder orig = JsonXContent.contentBuilder().startObject(); createMapperService(runtimeFieldMapping(this::minimalMapping)).documentMapper().mapping().toXContent(orig, ToXContent.EMPTY_PARAMS); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java index 16cb0b4656fcf..61fc190e4952d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockFieldFilterPlugin.java @@ -8,18 +8,32 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.core.Predicates; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; import java.util.function.Function; -import java.util.function.Predicate; public class MockFieldFilterPlugin extends Plugin implements MapperPlugin { @Override - public Function> getFieldFilter() { + public Function getFieldFilter() { // this filter doesn't filter any field out, but it's used to exercise the code path executed when the filter is not no-op - return index -> Predicates.always(); + return index -> new FieldPredicate() { + @Override + public boolean test(String field) { + return true; + } + + @Override + public String modifyHash(String hash) { + return hash + ":includeall"; + } + + @Override + public long ramBytesUsed() { + return 0; + } + }; } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index 71837ccf14387..8831149fec905 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -24,10 +24,12 @@ import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult; +import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.InstantiatingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; @@ -40,6 +42,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public enum SearchResponseUtils { ; @@ -439,7 +442,7 @@ private static void parseProfileResultsEntry(XContentParser parser, Map PROFILE_DFS_PHASE_RESULT_PARSER; + + static { + InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( + "search_profile_dfs_phase_result", + true, + SearchProfileDfsPhaseResult.class + ); + parser.declareObject(optionalConstructorArg(), (p, c) -> ProfileResult.fromXContent(p), SearchProfileDfsPhaseResult.STATISTICS); + parser.declareObjectArray(optionalConstructorArg(), (p, c) -> parseQueryProfileShardResult(p), SearchProfileDfsPhaseResult.KNN); + PROFILE_DFS_PHASE_RESULT_PARSER = parser.build(); + } + + public static SearchProfileDfsPhaseResult parseProfileDfsPhaseResult(XContentParser parser) throws IOException { + return PROFILE_DFS_PHASE_RESULT_PARSER.parse(parser, null); + } + + public static QueryProfileShardResult parseQueryProfileShardResult(XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + String currentFieldName = null; + List queryProfileResults = new ArrayList<>(); + long rewriteTime = 0; + Long vectorOperationsCount = null; + CollectorResult collector = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (QueryProfileShardResult.REWRITE_TIME.equals(currentFieldName)) { + rewriteTime = parser.longValue(); + } else if (QueryProfileShardResult.VECTOR_OPERATIONS_COUNT.equals(currentFieldName)) { + vectorOperationsCount = parser.longValue(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (QueryProfileShardResult.QUERY_ARRAY.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + queryProfileResults.add(ProfileResult.fromXContent(parser)); + } + } else if (QueryProfileShardResult.COLLECTOR.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + collector = CollectorResult.fromXContent(parser); + } + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + return new QueryProfileShardResult(queryProfileResults, rewriteTime, collector, vectorOperationsCount); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java index 5f720eededf02..751eed222ee7a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBroadcastResponseTestCase.java @@ -10,12 +10,15 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; @@ -24,12 +27,38 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; public abstract class AbstractBroadcastResponseTestCase extends AbstractXContentTestCase { + private static final ParseField _SHARDS_FIELD = new ParseField("_shards"); + private static final ParseField TOTAL_FIELD = new ParseField("total"); + private static final ParseField SUCCESSFUL_FIELD = new ParseField("successful"); + private static final ParseField FAILED_FIELD = new ParseField("failed"); + private static final ParseField FAILURES_FIELD = new ParseField("failures"); + + @SuppressWarnings("unchecked") + public static void declareBroadcastFields(ConstructingObjectParser PARSER) { + ConstructingObjectParser shardsParser = new ConstructingObjectParser<>( + "_shards", + true, + arg -> new BaseBroadcastResponse((int) arg[0], (int) arg[1], (int) arg[2], (List) arg[3]) + ); + shardsParser.declareInt(constructorArg(), TOTAL_FIELD); + shardsParser.declareInt(constructorArg(), SUCCESSFUL_FIELD); + shardsParser.declareInt(constructorArg(), FAILED_FIELD); + shardsParser.declareObjectArray( + optionalConstructorArg(), + (p, c) -> DefaultShardOperationFailedException.fromXContent(p), + FAILURES_FIELD + ); + PARSER.declareObject(constructorArg(), shardsParser, _SHARDS_FIELD); + } + @Override protected T createTestInstance() { int totalShards = randomIntBetween(1, 10); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 307daddd17c37..6905ee391a6eb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -72,6 +72,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.AbstractBroadcastResponseTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.DeprecationHandler; @@ -1312,7 +1313,7 @@ protected static BroadcastResponse refresh(String index) throws IOException { ); static { - BaseBroadcastResponse.declareBroadcastFields(BROADCAST_RESPONSE_PARSER); + AbstractBroadcastResponseTestCase.declareBroadcastFields(BROADCAST_RESPONSE_PARSER); } protected static BroadcastResponse refresh(RestClient client, String index) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicate.java new file mode 100644 index 0000000000000..90ee353b46eaa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicate.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.apache.lucene.util.RamUsageEstimator; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.apache.lucene.util.automaton.Transition; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.plugins.FieldPredicate; + +import java.io.IOException; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.util.Base64; + +/** + * An implementation of {@link FieldPredicate} which matches fields + * against an {@link Automaton}. + */ +class AutomatonFieldPredicate implements FieldPredicate { + private final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(AutomatonFieldPredicate.class); + + private final String automatonHash; + private final CharacterRunAutomaton automaton; + + AutomatonFieldPredicate(Automaton originalAutomaton, CharacterRunAutomaton automaton) { + this.automatonHash = sha256(originalAutomaton); + this.automaton = automaton; + } + + @Override + public boolean test(String field) { + return automaton.run(field); + } + + @Override + public String modifyHash(String hash) { + return hash + ":" + automatonHash; + } + + @Override + public long ramBytesUsed() { + return SHALLOW_SIZE + RamUsageEstimator.sizeOf(automatonHash); // automaton itself is a shallow copy so not counted here + } + + private static String sha256(Automaton automaton) { + MessageDigest messageDigest = MessageDigests.sha256(); + try { + StreamOutput out = new OutputStreamStreamOutput(new DigestOutputStream(Streams.NULL_OUTPUT_STREAM, messageDigest)); + Transition t = new Transition(); + for (int state = 0; state < automaton.getNumStates(); state++) { + out.writeInt(state); + out.writeBoolean(automaton.isAccept(state)); + + int numTransitions = automaton.initTransition(state, t); + for (int i = 0; i < numTransitions; ++i) { + automaton.getNextTransition(t); + out.writeInt(t.dest); + out.writeInt(t.min); + out.writeInt(t.max); + } + } + } catch (IOException bogus) { + // cannot happen + throw new Error(bogus); + } + return Base64.getEncoder().encodeToString(messageDigest.digest()); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java index 8f2088f55ade6..f3c2d9f62e40f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReader; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition.FieldGrantExcludeGroup; import org.elasticsearch.xpack.core.security.authz.support.SecurityQueryTemplateEvaluator.DlsQueryEvaluationContext; @@ -67,6 +68,7 @@ public final class FieldPermissions implements Accountable, CacheKey { private final CharacterRunAutomaton permittedFieldsAutomaton; private final boolean permittedFieldsAutomatonIsTotal; private final Automaton originalAutomaton; + private final FieldPredicate fieldPredicate; private final long ramBytesUsed; @@ -106,6 +108,9 @@ private FieldPermissions(List fieldPermissionsDefini this.permittedFieldsAutomaton = new CharacterRunAutomaton(permittedFieldsAutomaton); // we cache the result of isTotal since this might be a costly operation this.permittedFieldsAutomatonIsTotal = Operations.isTotal(permittedFieldsAutomaton); + this.fieldPredicate = permittedFieldsAutomatonIsTotal + ? FieldPredicate.ACCEPT_ALL + : new AutomatonFieldPredicate(originalAutomaton, this.permittedFieldsAutomaton); long ramBytesUsed = BASE_FIELD_PERM_DEF_BYTES; ramBytesUsed += this.fieldPermissionsDefinitions.stream() @@ -113,6 +118,7 @@ private FieldPermissions(List fieldPermissionsDefini .sum(); ramBytesUsed += permittedFieldsAutomaton.ramBytesUsed(); ramBytesUsed += runAutomatonRamBytesUsed(permittedFieldsAutomaton); + ramBytesUsed += fieldPredicate.ramBytesUsed(); this.ramBytesUsed = ramBytesUsed; } @@ -220,6 +226,10 @@ public boolean grantsAccessTo(String fieldName) { return permittedFieldsAutomatonIsTotal || permittedFieldsAutomaton.run(fieldName); } + public FieldPredicate fieldPredicate() { + return fieldPredicate; + } + public List getFieldPermissionsDefinitions() { return fieldPermissionsDefinitions; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java index 43dc92857551a..d89732cb3b177 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java @@ -7,22 +7,15 @@ package org.elasticsearch.xpack.core.termsenum.action; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; import java.util.Collections; import java.util.List; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - /** * The response of the _terms_enum action. */ @@ -31,28 +24,6 @@ public class TermsEnumResponse extends BroadcastResponse { public static final String TERMS_FIELD = "terms"; public static final String COMPLETE_FIELD = "complete"; - @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "term_enum_results", - true, - arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new TermsEnumResponse( - (List) arg[1], - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()), - (Boolean) arg[2] - ); - } - ); - static { - declareBroadcastFields(PARSER); - PARSER.declareStringArray(optionalConstructorArg(), new ParseField(TERMS_FIELD)); - PARSER.declareBoolean(optionalConstructorArg(), new ParseField(COMPLETE_FIELD)); - } - private final List terms; private boolean complete; @@ -106,7 +77,4 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.field(COMPLETE_FIELD, complete); } - public static TermsEnumResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index bd267d19398b0..918976c0d3db8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -67,6 +67,7 @@ import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; @@ -454,8 +455,8 @@ public void onIndexModule(IndexModule indexModule) { } @Override - public Function> getFieldFilter() { - List>> items = filterPlugins(MapperPlugin.class).stream() + public Function getFieldFilter() { + List> items = filterPlugins(MapperPlugin.class).stream() .map(p -> p.getFieldFilter()) .filter(p -> p.equals(NOOP_FIELD_FILTER) == false) .toList(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicateTests.java new file mode 100644 index 0000000000000..d62cbb7dbab6b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/AutomatonFieldPredicateTests.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.permission; + +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class AutomatonFieldPredicateTests extends ESTestCase { + public void testMatching() { + String str = randomAlphaOfLength(10); + Automaton a = Automata.makeString(str); + AutomatonFieldPredicate pred = new AutomatonFieldPredicate(a, new CharacterRunAutomaton(a)); + assertTrue(pred.test(str)); + assertFalse(pred.test(str + randomAlphaOfLength(1))); + } + + public void testHash() { + Automaton a = Automata.makeString("a"); + AutomatonFieldPredicate predA = new AutomatonFieldPredicate(a, new CharacterRunAutomaton(a)); + + Automaton b = Automata.makeString("b"); + AutomatonFieldPredicate predB = new AutomatonFieldPredicate(b, new CharacterRunAutomaton(b)); + + assertThat(predA.modifyHash("a"), not(equalTo(predB.modifyHash("a")))); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumResponseTests.java index a31c44a165cdf..1804de134c8fb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TermsEnumResponseTests.java @@ -8,19 +8,48 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.AbstractBroadcastResponseTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.termsenum.action.TermsEnumResponse; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Set; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class TermsEnumResponseTests extends AbstractBroadcastResponseTestCase { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "term_enum_results", + true, + arg -> { + BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; + return new TermsEnumResponse( + (List) arg[1], + response.getTotalShards(), + response.getSuccessfulShards(), + response.getFailedShards(), + Arrays.asList(response.getShardFailures()), + (Boolean) arg[2] + ); + } + ); + + static { + AbstractBroadcastResponseTestCase.declareBroadcastFields(PARSER); + PARSER.declareStringArray(optionalConstructorArg(), new ParseField(TermsEnumResponse.TERMS_FIELD)); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField(TermsEnumResponse.COMPLETE_FIELD)); + } + protected static List getRandomTerms() { int termCount = randomIntBetween(0, 100); Set uniqueTerms = Sets.newHashSetWithExpectedSize(termCount); @@ -48,7 +77,7 @@ private static TermsEnumResponse createRandomTermEnumResponse() { @Override protected TermsEnumResponse doParseInstance(XContentParser parser) throws IOException { - return TermsEnumResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index 0570d93441be1..58401451fa86b 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -805,7 +805,17 @@ private void createDownsampleIndex( .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, String.valueOf(numberOfReplicas)) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") .put(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey(), DownsampleTaskStatus.STARTED) - .put(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey(), downsampleInterval); + .put(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey(), downsampleInterval) + .put(IndexSettings.MODE.getKey(), sourceIndexMetadata.getIndexMode()) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), sourceIndexMetadata.getRoutingPaths()) + .put( + IndexSettings.TIME_SERIES_START_TIME.getKey(), + sourceIndexMetadata.getSettings().get(IndexSettings.TIME_SERIES_START_TIME.getKey()) + ) + .put( + IndexSettings.TIME_SERIES_END_TIME.getKey(), + sourceIndexMetadata.getSettings().get(IndexSettings.TIME_SERIES_END_TIME.getKey()) + ); if (sourceIndexMetadata.getSettings().hasValue(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey())) { builder.put( MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java index 1be01f445691d..cb65d2337d588 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/GroupingAggregatorImplementer.java @@ -100,7 +100,7 @@ public GroupingAggregatorImplementer( this.createParameters = init.getParameters() .stream() .map(Parameter::from) - .filter(f -> false == f.type().equals(BIG_ARRAYS)) + .filter(f -> false == f.type().equals(BIG_ARRAYS) && false == f.type().equals(DRIVER_CONTEXT)) .collect(Collectors.toList()); this.implementation = ClassName.get( diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java index 016bf9387ca4b..2dc5b441ca00d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateDoubleAggregator.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.compute.ann.GroupingAggregator; @@ -23,6 +24,8 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import java.util.Arrays; + /** * A rate grouping aggregation definition for double. * This class is generated. Edit `X-RateAggregator.java.st` instead. @@ -35,9 +38,9 @@ @IntermediateState(name = "resets", type = "DOUBLE") } ) public class RateDoubleAggregator { - public static DoubleRateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { - // TODO: pass BlockFactory instead bigArrays so we can use the breaker - return new DoubleRateGroupingState(bigArrays, unitInMillis); + + public static DoubleRateGroupingState initGrouping(DriverContext driverContext, long unitInMillis) { + return new DoubleRateGroupingState(driverContext.bigArrays(), driverContext.breaker(), unitInMillis); } public static void combine(DoubleRateGroupingState current, int groupId, long timestamp, double value) { @@ -58,17 +61,17 @@ public static void combineIntermediate( public static void combineStates( DoubleRateGroupingState current, int currentGroupId, // make the stylecheck happy - DoubleRateGroupingState state, - int statePosition + DoubleRateGroupingState otherState, + int otherGroupId ) { - throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + current.combineState(currentGroupId, otherState, otherGroupId); } public static Block evaluateFinal(DoubleRateGroupingState state, IntVector selected, DriverContext driverContext) { return state.evaluateFinal(selected, driverContext.blockFactory()); } - private static class DoubleRateState implements Accountable { + private static class DoubleRateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(DoubleRateState.class); final long[] timestamps; // descending order final double[] values; @@ -101,9 +104,10 @@ int entries() { return timestamps.length; } - @Override - public long ramBytesUsed() { - return BASE_RAM_USAGE; + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Double.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; } } @@ -111,9 +115,12 @@ public static final class DoubleRateGroupingState implements Releasable, Account private ObjectArray states; private final long unitInMillis; private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states - DoubleRateGroupingState(BigArrays bigArrays, long unitInMillis) { + DoubleRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker, long unitInMillis) { this.bigArrays = bigArrays; + this.breaker = breaker; this.states = bigArrays.newObjectArray(1); this.unitInMillis = unitInMillis; } @@ -122,16 +129,25 @@ void ensureCapacity(int groupId) { states = bigArrays.grow(states, groupId + 1); } + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + void append(int groupId, long timestamp, double value) { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(DoubleRateState.bytesUsed(1)); state = new DoubleRateState(new long[] { timestamp }, new double[] { value }); states.set(groupId, state); } else { if (state.entries() == 1) { + adjustBreaker(DoubleRateState.bytesUsed(2)); state = new DoubleRateState(new long[] { state.timestamps[0], timestamp }, new double[] { state.values[0], value }); states.set(groupId, state); + adjustBreaker(-DoubleRateState.bytesUsed(1)); // old state } else { state.append(timestamp, value); } @@ -147,7 +163,9 @@ void combine(int groupId, LongBlock timestamps, DoubleBlock values, double reset ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(DoubleRateState.bytesUsed(valueCount)); state = new DoubleRateState(valueCount); + state.reset = reset; states.set(groupId, state); // TODO: add bulk_copy to Block for (int i = 0; i < valueCount; i++) { @@ -155,11 +173,13 @@ void combine(int groupId, LongBlock timestamps, DoubleBlock values, double reset state.values[i] = values.getDouble(firstIndex + i); } } else { + adjustBreaker(DoubleRateState.bytesUsed(state.entries() + valueCount)); var newState = new DoubleRateState(state.entries() + valueCount); + newState.reset = state.reset + reset; states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-DoubleRateState.bytesUsed(state.entries())); // old state } - state.reset += reset; } void merge(DoubleRateState curr, DoubleRateState dst, int firstIndex, int rightCount, LongBlock timestamps, DoubleBlock values) { @@ -191,14 +211,57 @@ void merge(DoubleRateState curr, DoubleRateState dst, int firstIndex, int rightC } } + void combineState(int groupId, DoubleRateGroupingState otherState, int otherGroupId) { + var other = otherGroupId < otherState.states.size() ? otherState.states.get(otherGroupId) : null; + if (other == null) { + return; + } + ensureCapacity(groupId); + var curr = states.get(groupId); + if (curr == null) { + var len = other.entries(); + adjustBreaker(DoubleRateState.bytesUsed(len)); + curr = new DoubleRateState(Arrays.copyOf(other.timestamps, len), Arrays.copyOf(other.values, len)); + curr.reset = other.reset; + states.set(groupId, curr); + } else { + states.set(groupId, mergeState(curr, other)); + } + } + + DoubleRateState mergeState(DoubleRateState s1, DoubleRateState s2) { + var newLen = s1.entries() + s2.entries(); + adjustBreaker(DoubleRateState.bytesUsed(newLen)); + var dst = new DoubleRateState(newLen); + dst.reset = s1.reset + s2.reset; + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries()) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); + System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); + System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); + System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); + return dst; + } + @Override public long ramBytesUsed() { - return states.ramBytesUsed(); + return states.ramBytesUsed() + stateBytes; } @Override public void close() { - Releasables.close(states); + Releasables.close(states, () -> adjustBreaker(-stateBytes)); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java index fbf43f7d72c46..1ba8b9264c24a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateIntAggregator.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.compute.ann.GroupingAggregator; @@ -24,6 +25,8 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import java.util.Arrays; + /** * A rate grouping aggregation definition for int. * This class is generated. Edit `X-RateAggregator.java.st` instead. @@ -36,9 +39,9 @@ @IntermediateState(name = "resets", type = "DOUBLE") } ) public class RateIntAggregator { - public static IntRateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { - // TODO: pass BlockFactory instead bigArrays so we can use the breaker - return new IntRateGroupingState(bigArrays, unitInMillis); + + public static IntRateGroupingState initGrouping(DriverContext driverContext, long unitInMillis) { + return new IntRateGroupingState(driverContext.bigArrays(), driverContext.breaker(), unitInMillis); } public static void combine(IntRateGroupingState current, int groupId, long timestamp, int value) { @@ -59,17 +62,17 @@ public static void combineIntermediate( public static void combineStates( IntRateGroupingState current, int currentGroupId, // make the stylecheck happy - IntRateGroupingState state, - int statePosition + IntRateGroupingState otherState, + int otherGroupId ) { - throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + current.combineState(currentGroupId, otherState, otherGroupId); } public static Block evaluateFinal(IntRateGroupingState state, IntVector selected, DriverContext driverContext) { return state.evaluateFinal(selected, driverContext.blockFactory()); } - private static class IntRateState implements Accountable { + private static class IntRateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(IntRateState.class); final long[] timestamps; // descending order final int[] values; @@ -102,9 +105,10 @@ int entries() { return timestamps.length; } - @Override - public long ramBytesUsed() { - return BASE_RAM_USAGE; + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Integer.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; } } @@ -112,9 +116,12 @@ public static final class IntRateGroupingState implements Releasable, Accountabl private ObjectArray states; private final long unitInMillis; private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states - IntRateGroupingState(BigArrays bigArrays, long unitInMillis) { + IntRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker, long unitInMillis) { this.bigArrays = bigArrays; + this.breaker = breaker; this.states = bigArrays.newObjectArray(1); this.unitInMillis = unitInMillis; } @@ -123,16 +130,25 @@ void ensureCapacity(int groupId) { states = bigArrays.grow(states, groupId + 1); } + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + void append(int groupId, long timestamp, int value) { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(IntRateState.bytesUsed(1)); state = new IntRateState(new long[] { timestamp }, new int[] { value }); states.set(groupId, state); } else { if (state.entries() == 1) { + adjustBreaker(IntRateState.bytesUsed(2)); state = new IntRateState(new long[] { state.timestamps[0], timestamp }, new int[] { state.values[0], value }); states.set(groupId, state); + adjustBreaker(-IntRateState.bytesUsed(1)); // old state } else { state.append(timestamp, value); } @@ -148,7 +164,9 @@ void combine(int groupId, LongBlock timestamps, IntBlock values, double reset, i ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(IntRateState.bytesUsed(valueCount)); state = new IntRateState(valueCount); + state.reset = reset; states.set(groupId, state); // TODO: add bulk_copy to Block for (int i = 0; i < valueCount; i++) { @@ -156,11 +174,13 @@ void combine(int groupId, LongBlock timestamps, IntBlock values, double reset, i state.values[i] = values.getInt(firstIndex + i); } } else { + adjustBreaker(IntRateState.bytesUsed(state.entries() + valueCount)); var newState = new IntRateState(state.entries() + valueCount); + newState.reset = state.reset + reset; states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-IntRateState.bytesUsed(state.entries())); // old state } - state.reset += reset; } void merge(IntRateState curr, IntRateState dst, int firstIndex, int rightCount, LongBlock timestamps, IntBlock values) { @@ -192,14 +212,57 @@ void merge(IntRateState curr, IntRateState dst, int firstIndex, int rightCount, } } + void combineState(int groupId, IntRateGroupingState otherState, int otherGroupId) { + var other = otherGroupId < otherState.states.size() ? otherState.states.get(otherGroupId) : null; + if (other == null) { + return; + } + ensureCapacity(groupId); + var curr = states.get(groupId); + if (curr == null) { + var len = other.entries(); + adjustBreaker(IntRateState.bytesUsed(len)); + curr = new IntRateState(Arrays.copyOf(other.timestamps, len), Arrays.copyOf(other.values, len)); + curr.reset = other.reset; + states.set(groupId, curr); + } else { + states.set(groupId, mergeState(curr, other)); + } + } + + IntRateState mergeState(IntRateState s1, IntRateState s2) { + var newLen = s1.entries() + s2.entries(); + adjustBreaker(IntRateState.bytesUsed(newLen)); + var dst = new IntRateState(newLen); + dst.reset = s1.reset + s2.reset; + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries()) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); + System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); + System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); + System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); + return dst; + } + @Override public long ramBytesUsed() { - return states.ramBytesUsed(); + return states.ramBytesUsed() + stateBytes; } @Override public void close() { - Releasables.close(states); + Releasables.close(states, () -> adjustBreaker(-stateBytes)); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java index b5d0dfc8aabdb..846c6f0cc2730 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/RateLongAggregator.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.compute.ann.GroupingAggregator; @@ -23,6 +24,8 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import java.util.Arrays; + /** * A rate grouping aggregation definition for long. * This class is generated. Edit `X-RateAggregator.java.st` instead. @@ -35,9 +38,9 @@ @IntermediateState(name = "resets", type = "DOUBLE") } ) public class RateLongAggregator { - public static LongRateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { - // TODO: pass BlockFactory instead bigArrays so we can use the breaker - return new LongRateGroupingState(bigArrays, unitInMillis); + + public static LongRateGroupingState initGrouping(DriverContext driverContext, long unitInMillis) { + return new LongRateGroupingState(driverContext.bigArrays(), driverContext.breaker(), unitInMillis); } public static void combine(LongRateGroupingState current, int groupId, long timestamp, long value) { @@ -58,17 +61,17 @@ public static void combineIntermediate( public static void combineStates( LongRateGroupingState current, int currentGroupId, // make the stylecheck happy - LongRateGroupingState state, - int statePosition + LongRateGroupingState otherState, + int otherGroupId ) { - throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + current.combineState(currentGroupId, otherState, otherGroupId); } public static Block evaluateFinal(LongRateGroupingState state, IntVector selected, DriverContext driverContext) { return state.evaluateFinal(selected, driverContext.blockFactory()); } - private static class LongRateState implements Accountable { + private static class LongRateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject(LongRateState.class); final long[] timestamps; // descending order final long[] values; @@ -101,9 +104,10 @@ int entries() { return timestamps.length; } - @Override - public long ramBytesUsed() { - return BASE_RAM_USAGE; + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + return BASE_RAM_USAGE + ts + vs; } } @@ -111,9 +115,12 @@ public static final class LongRateGroupingState implements Releasable, Accountab private ObjectArray states; private final long unitInMillis; private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states - LongRateGroupingState(BigArrays bigArrays, long unitInMillis) { + LongRateGroupingState(BigArrays bigArrays, CircuitBreaker breaker, long unitInMillis) { this.bigArrays = bigArrays; + this.breaker = breaker; this.states = bigArrays.newObjectArray(1); this.unitInMillis = unitInMillis; } @@ -122,16 +129,25 @@ void ensureCapacity(int groupId) { states = bigArrays.grow(states, groupId + 1); } + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + void append(int groupId, long timestamp, long value) { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(LongRateState.bytesUsed(1)); state = new LongRateState(new long[] { timestamp }, new long[] { value }); states.set(groupId, state); } else { if (state.entries() == 1) { + adjustBreaker(LongRateState.bytesUsed(2)); state = new LongRateState(new long[] { state.timestamps[0], timestamp }, new long[] { state.values[0], value }); states.set(groupId, state); + adjustBreaker(-LongRateState.bytesUsed(1)); // old state } else { state.append(timestamp, value); } @@ -147,7 +163,9 @@ void combine(int groupId, LongBlock timestamps, LongBlock values, double reset, ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker(LongRateState.bytesUsed(valueCount)); state = new LongRateState(valueCount); + state.reset = reset; states.set(groupId, state); // TODO: add bulk_copy to Block for (int i = 0; i < valueCount; i++) { @@ -155,11 +173,13 @@ void combine(int groupId, LongBlock timestamps, LongBlock values, double reset, state.values[i] = values.getLong(firstIndex + i); } } else { + adjustBreaker(LongRateState.bytesUsed(state.entries() + valueCount)); var newState = new LongRateState(state.entries() + valueCount); + newState.reset = state.reset + reset; states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-LongRateState.bytesUsed(state.entries())); // old state } - state.reset += reset; } void merge(LongRateState curr, LongRateState dst, int firstIndex, int rightCount, LongBlock timestamps, LongBlock values) { @@ -191,14 +211,57 @@ void merge(LongRateState curr, LongRateState dst, int firstIndex, int rightCount } } + void combineState(int groupId, LongRateGroupingState otherState, int otherGroupId) { + var other = otherGroupId < otherState.states.size() ? otherState.states.get(otherGroupId) : null; + if (other == null) { + return; + } + ensureCapacity(groupId); + var curr = states.get(groupId); + if (curr == null) { + var len = other.entries(); + adjustBreaker(LongRateState.bytesUsed(len)); + curr = new LongRateState(Arrays.copyOf(other.timestamps, len), Arrays.copyOf(other.values, len)); + curr.reset = other.reset; + states.set(groupId, curr); + } else { + states.set(groupId, mergeState(curr, other)); + } + } + + LongRateState mergeState(LongRateState s1, LongRateState s2) { + var newLen = s1.entries() + s2.entries(); + adjustBreaker(LongRateState.bytesUsed(newLen)); + var dst = new LongRateState(newLen); + dst.reset = s1.reset + s2.reset; + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries()) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); + System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); + System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); + System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); + return dst; + } + @Override public long ramBytesUsed() { - return states.ramBytesUsed(); + return states.ramBytesUsed() + stateBytes; } @Override public void close() { - Releasables.close(states); + Releasables.close(states, () -> adjustBreaker(-stateBytes)); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java index 608221614c483..8d9e011891e95 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateDoubleGroupingAggregatorFunction.java @@ -49,7 +49,7 @@ public RateDoubleGroupingAggregatorFunction(List channels, public static RateDoubleGroupingAggregatorFunction create(List channels, DriverContext driverContext, long unitInMillis) { - return new RateDoubleGroupingAggregatorFunction(channels, RateDoubleAggregator.initGrouping(driverContext.bigArrays(), unitInMillis), driverContext, unitInMillis); + return new RateDoubleGroupingAggregatorFunction(channels, RateDoubleAggregator.initGrouping(driverContext, unitInMillis), driverContext, unitInMillis); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java index df954d92a6d2a..6bd4b833dc9e6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateIntGroupingAggregatorFunction.java @@ -49,7 +49,7 @@ public RateIntGroupingAggregatorFunction(List channels, public static RateIntGroupingAggregatorFunction create(List channels, DriverContext driverContext, long unitInMillis) { - return new RateIntGroupingAggregatorFunction(channels, RateIntAggregator.initGrouping(driverContext.bigArrays(), unitInMillis), driverContext, unitInMillis); + return new RateIntGroupingAggregatorFunction(channels, RateIntAggregator.initGrouping(driverContext, unitInMillis), driverContext, unitInMillis); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java index fb536465ed973..27318d6496737 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/RateLongGroupingAggregatorFunction.java @@ -49,7 +49,7 @@ public RateLongGroupingAggregatorFunction(List channels, public static RateLongGroupingAggregatorFunction create(List channels, DriverContext driverContext, long unitInMillis) { - return new RateLongGroupingAggregatorFunction(channels, RateLongAggregator.initGrouping(driverContext.bigArrays(), unitInMillis), driverContext, unitInMillis); + return new RateLongGroupingAggregatorFunction(channels, RateLongAggregator.initGrouping(driverContext, unitInMillis), driverContext, unitInMillis); } public static List intermediateStateDesc() { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st index 9ace663fec990..ad305809c6651 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-RateAggregator.java.st @@ -9,6 +9,7 @@ package org.elasticsearch.compute.aggregation; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.compute.ann.GroupingAggregator; @@ -26,6 +27,8 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import java.util.Arrays; + /** * A rate grouping aggregation definition for $type$. * This class is generated. Edit `X-RateAggregator.java.st` instead. @@ -38,9 +41,9 @@ import org.elasticsearch.core.Releasables; @IntermediateState(name = "resets", type = "DOUBLE") } ) public class Rate$Type$Aggregator { - public static $Type$RateGroupingState initGrouping(BigArrays bigArrays, long unitInMillis) { - // TODO: pass BlockFactory instead bigArrays so we can use the breaker - return new $Type$RateGroupingState(bigArrays, unitInMillis); + + public static $Type$RateGroupingState initGrouping(DriverContext driverContext, long unitInMillis) { + return new $Type$RateGroupingState(driverContext.bigArrays(), driverContext.breaker(), unitInMillis); } public static void combine($Type$RateGroupingState current, int groupId, long timestamp, $type$ value) { @@ -61,17 +64,17 @@ public class Rate$Type$Aggregator { public static void combineStates( $Type$RateGroupingState current, int currentGroupId, // make the stylecheck happy - $Type$RateGroupingState state, - int statePosition + $Type$RateGroupingState otherState, + int otherGroupId ) { - throw new UnsupportedOperationException("ordinals grouping is not supported yet"); + current.combineState(currentGroupId, otherState, otherGroupId); } public static Block evaluateFinal($Type$RateGroupingState state, IntVector selected, DriverContext driverContext) { return state.evaluateFinal(selected, driverContext.blockFactory()); } - private static class $Type$RateState implements Accountable { + private static class $Type$RateState { static final long BASE_RAM_USAGE = RamUsageEstimator.sizeOfObject($Type$RateState.class); final long[] timestamps; // descending order final $type$[] values; @@ -104,9 +107,10 @@ public class Rate$Type$Aggregator { return timestamps.length; } - @Override - public long ramBytesUsed() { - return BASE_RAM_USAGE; + static long bytesUsed(int entries) { + var ts = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) Long.BYTES * entries); + var vs = RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (long) $BYTES$ * entries); + return BASE_RAM_USAGE + ts + vs; } } @@ -114,9 +118,12 @@ public class Rate$Type$Aggregator { private ObjectArray<$Type$RateState> states; private final long unitInMillis; private final BigArrays bigArrays; + private final CircuitBreaker breaker; + private long stateBytes; // for individual states - $Type$RateGroupingState(BigArrays bigArrays, long unitInMillis) { + $Type$RateGroupingState(BigArrays bigArrays, CircuitBreaker breaker, long unitInMillis) { this.bigArrays = bigArrays; + this.breaker = breaker; this.states = bigArrays.newObjectArray(1); this.unitInMillis = unitInMillis; } @@ -125,16 +132,25 @@ public class Rate$Type$Aggregator { states = bigArrays.grow(states, groupId + 1); } + void adjustBreaker(long bytes) { + breaker.addEstimateBytesAndMaybeBreak(bytes, "<>"); + stateBytes += bytes; + assert stateBytes >= 0 : stateBytes; + } + void append(int groupId, long timestamp, $type$ value) { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker($Type$RateState.bytesUsed(1)); state = new $Type$RateState(new long[] { timestamp }, new $type$[] { value }); states.set(groupId, state); } else { if (state.entries() == 1) { + adjustBreaker($Type$RateState.bytesUsed(2)); state = new $Type$RateState(new long[] { state.timestamps[0], timestamp }, new $type$[] { state.values[0], value }); states.set(groupId, state); + adjustBreaker(-$Type$RateState.bytesUsed(1)); // old state } else { state.append(timestamp, value); } @@ -150,7 +166,9 @@ public class Rate$Type$Aggregator { ensureCapacity(groupId); var state = states.get(groupId); if (state == null) { + adjustBreaker($Type$RateState.bytesUsed(valueCount)); state = new $Type$RateState(valueCount); + state.reset = reset; states.set(groupId, state); // TODO: add bulk_copy to Block for (int i = 0; i < valueCount; i++) { @@ -158,11 +176,13 @@ public class Rate$Type$Aggregator { state.values[i] = values.get$Type$(firstIndex + i); } } else { + adjustBreaker($Type$RateState.bytesUsed(state.entries() + valueCount)); var newState = new $Type$RateState(state.entries() + valueCount); + newState.reset = state.reset + reset; states.set(groupId, newState); merge(state, newState, firstIndex, valueCount, timestamps, values); + adjustBreaker(-$Type$RateState.bytesUsed(state.entries())); // old state } - state.reset += reset; } void merge($Type$RateState curr, $Type$RateState dst, int firstIndex, int rightCount, LongBlock timestamps, $Type$Block values) { @@ -194,14 +214,57 @@ public class Rate$Type$Aggregator { } } + void combineState(int groupId, $Type$RateGroupingState otherState, int otherGroupId) { + var other = otherGroupId < otherState.states.size() ? otherState.states.get(otherGroupId) : null; + if (other == null) { + return; + } + ensureCapacity(groupId); + var curr = states.get(groupId); + if (curr == null) { + var len = other.entries(); + adjustBreaker($Type$RateState.bytesUsed(len)); + curr = new $Type$RateState(Arrays.copyOf(other.timestamps, len), Arrays.copyOf(other.values, len)); + curr.reset = other.reset; + states.set(groupId, curr); + } else { + states.set(groupId, mergeState(curr, other)); + } + } + + $Type$RateState mergeState($Type$RateState s1, $Type$RateState s2) { + var newLen = s1.entries() + s2.entries(); + adjustBreaker($Type$RateState.bytesUsed(newLen)); + var dst = new $Type$RateState(newLen); + dst.reset = s1.reset + s2.reset; + int i = 0, j = 0, k = 0; + while (i < s1.entries() && j < s2.entries()) { + if (s1.timestamps[i] > s2.timestamps[j]) { + dst.timestamps[k] = s1.timestamps[i]; + dst.values[k] = s1.values[i]; + ++i; + } else { + dst.timestamps[k] = s2.timestamps[j]; + dst.values[k] = s2.values[j]; + ++j; + } + ++k; + } + System.arraycopy(s1.timestamps, i, dst.timestamps, k, s1.entries() - i); + System.arraycopy(s1.values, i, dst.values, k, s1.entries() - i); + System.arraycopy(s2.timestamps, j, dst.timestamps, k, s2.entries() - j); + System.arraycopy(s2.values, j, dst.values, k, s2.entries() - j); + return dst; + } + @Override public long ramBytesUsed() { - return states.ramBytesUsed(); + return states.ramBytesUsed() + stateBytes; } @Override public void close() { - Releasables.close(states); + Releasables.close(states, () -> adjustBreaker(-stateBytes)); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java index ad884538ac85f..855066fcb9da5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java @@ -143,14 +143,11 @@ public Page getOutput() { } iterator.consume(); shard = blockFactory.newConstantIntBlockWith(iterator.slice.shardContext().index(), currentPagePos); - boolean singleSegmentNonDecreasing; if (iterator.slice.numLeaves() == 1) { - singleSegmentNonDecreasing = true; int segmentOrd = iterator.slice.getLeaf(0).leafReaderContext().ord; leaf = blockFactory.newConstantIntBlockWith(segmentOrd, currentPagePos).asVector(); } else { // Due to the multi segment nature of time series source operator singleSegmentNonDecreasing must be false - singleSegmentNonDecreasing = false; leaf = segmentsBuilder.build(); segmentsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); } @@ -161,10 +158,9 @@ public Page getOutput() { timestampIntervalBuilder = blockFactory.newLongVectorBuilder(Math.min(remainingDocs, maxPageSize)); tsids = tsOrdBuilder.build(); tsOrdBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); - page = new Page( currentPagePos, - new DocVector(shard.asVector(), leaf, docs, singleSegmentNonDecreasing).asBlock(), + new DocVector(shard.asVector(), leaf, docs, leaf.isConstant()).asBlock(), tsids.asBlock(), timestampIntervals.asBlock() ); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java index 16340909a4fd3..b397d36837d01 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -43,14 +43,17 @@ import org.elasticsearch.compute.operator.HashAggregationOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OperatorTestCase; +import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.TestResultPageSinkOperator; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.BlockDocValuesReader; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.junit.After; @@ -285,17 +288,6 @@ record Doc(String pod, long timestamp, long requests) { return docs.size(); }); var ctx = driverContext(); - HashAggregationOperator initialHash = new HashAggregationOperator( - List.of(new RateLongAggregatorFunctionSupplier(List.of(4, 2), unitInMillis).groupingAggregatorFactory(AggregatorMode.INITIAL)), - () -> BlockHash.build( - List.of(new HashAggregationOperator.GroupSpec(3, ElementType.BYTES_REF)), - ctx.blockFactory(), - randomIntBetween(1, 1000), - randomBoolean() - ), - ctx - ); - HashAggregationOperator finalHash = new HashAggregationOperator( List.of(new RateLongAggregatorFunctionSupplier(List.of(1, 2, 3), unitInMillis).groupingAggregatorFactory(AggregatorMode.FINAL)), () -> BlockHash.build( @@ -309,20 +301,62 @@ record Doc(String pod, long timestamp, long requests) { List results = new ArrayList<>(); var requestsField = new NumberFieldMapper.NumberFieldType("requests", NumberFieldMapper.NumberType.LONG); var podField = new KeywordFieldMapper.KeywordFieldType("pod"); - OperatorTestCase.runDriver( - new Driver( - ctx, - sourceOperatorFactory.get(ctx), + if (randomBoolean()) { + HashAggregationOperator initialHash = new HashAggregationOperator( List.of( - ValuesSourceReaderOperatorTests.factory(reader, podField, ElementType.BYTES_REF).get(ctx), - ValuesSourceReaderOperatorTests.factory(reader, requestsField, ElementType.LONG).get(ctx), - initialHash, - finalHash + new RateLongAggregatorFunctionSupplier(List.of(4, 2), unitInMillis).groupingAggregatorFactory(AggregatorMode.INITIAL) ), - new TestResultPageSinkOperator(results::add), - () -> {} - ) - ); + () -> BlockHash.build( + List.of(new HashAggregationOperator.GroupSpec(3, ElementType.BYTES_REF)), + ctx.blockFactory(), + randomIntBetween(1, 1000), + randomBoolean() + ), + ctx + ); + OperatorTestCase.runDriver( + new Driver( + ctx, + sourceOperatorFactory.get(ctx), + List.of( + ValuesSourceReaderOperatorTests.factory(reader, podField, ElementType.BYTES_REF).get(ctx), + ValuesSourceReaderOperatorTests.factory(reader, requestsField, ElementType.LONG).get(ctx), + initialHash, + finalHash + ), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ); + } else { + var blockLoader = new BlockDocValuesReader.BytesRefsFromOrdsBlockLoader("pod"); + var shardContext = new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE); + var ordinalGrouping = new OrdinalsGroupingOperator( + shardIdx -> blockLoader, + List.of(shardContext), + ElementType.BYTES_REF, + 0, + "pod", + List.of( + new RateLongAggregatorFunctionSupplier(List.of(3, 2), unitInMillis).groupingAggregatorFactory(AggregatorMode.INITIAL) + ), + randomIntBetween(1, 1000), + ctx + ); + OperatorTestCase.runDriver( + new Driver( + ctx, + sourceOperatorFactory.get(ctx), + List.of( + ValuesSourceReaderOperatorTests.factory(reader, requestsField, ElementType.LONG).get(ctx), + ordinalGrouping, + finalHash + ), + new TestResultPageSinkOperator(results::add), + () -> {} + ) + ); + } Map rates = new HashMap<>(); for (Page result : results) { BytesRefBlock keysBlock = result.getBlock(0); diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index bb8163915c1c4..2dd64cf02446b 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql; import org.apache.http.HttpStatus; +import org.apache.http.util.EntityUtils; import org.elasticsearch.Build; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -31,6 +32,9 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class EsqlSecurityIT extends ESRestTestCase { @@ -47,6 +51,7 @@ public class EsqlSecurityIT extends ESRestTestCase { .user("user3", "x-pack-test-password", "user3", false) .user("user4", "x-pack-test-password", "user4", false) .user("user5", "x-pack-test-password", "user5", false) + .user("fls_user", "x-pack-test-password", "fls_user", false) .build(); @Override @@ -62,7 +67,11 @@ protected Settings restClientSettings() { private void indexDocument(String index, int id, double value, String org) throws IOException { Request indexDoc = new Request("PUT", index + "/_doc/" + id); - indexDoc.setJsonEntity("{\"value\":" + value + ",\"org\":\"" + org + "\"}"); + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + builder.field("value", value); + builder.field("org", org); + builder.field("partial", org + value); + indexDoc.setJsonEntity(Strings.toString(builder.endObject())); client().performRequest(indexDoc); } @@ -85,6 +94,11 @@ public void indexDocuments() throws IOException { indexDocument("index-user2", 1, 32.0, "marketing"); indexDocument("index-user2", 2, 40.0, "sales"); refresh("index-user2"); + + createIndex("indexpartial", Settings.EMPTY, mapping); + indexDocument("indexpartial", 1, 32.0, "marketing"); + indexDocument("indexpartial", 2, 40.0, "sales"); + refresh("indexpartial"); } public void testAllowedIndices() throws Exception { @@ -122,7 +136,7 @@ public void testUnauthorizedIndices() throws IOException { assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(400)); } - public void testDLS() throws Exception { + public void testDocumentLevelSecurity() throws Exception { Response resp = runESQLCommand("user3", "from index | stats sum=sum(value)"); assertOK(resp); Map respMap = entityAsMap(resp); @@ -130,6 +144,69 @@ public void testDLS() throws Exception { assertThat(respMap.get("values"), equalTo(List.of(List.of(10.0)))); } + public void testFieldLevelSecurityAllow() throws Exception { + Response resp = runESQLCommand("fls_user", "FROM index* | SORT value | LIMIT 1"); + assertOK(resp); + assertMap( + entityAsMap(resp), + matchesMap().extraOk() + .entry( + "columns", + List.of( + matchesMap().entry("name", "partial").entry("type", "text"), + matchesMap().entry("name", "value").entry("type", "double") + ) + ) + .entry("values", List.of(List.of("sales10.0", 10.0))) + ); + } + + public void testFieldLevelSecurityAllowPartial() throws Exception { + Request request = new Request("GET", "/index*/_field_caps"); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", "fls_user")); + request.addParameter("error_trace", "true"); + request.addParameter("pretty", "true"); + request.addParameter("fields", "*"); + + request = new Request("GET", "/index*/_search"); + request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", "fls_user")); + request.addParameter("error_trace", "true"); + request.addParameter("pretty", "true"); + + Response resp = runESQLCommand("fls_user", "FROM index* | SORT partial | LIMIT 1"); + assertOK(resp); + assertMap( + entityAsMap(resp), + matchesMap().extraOk() + .entry( + "columns", + List.of( + matchesMap().entry("name", "partial").entry("type", "text"), + matchesMap().entry("name", "value").entry("type", "double") + ) + ) + .entry("values", List.of(List.of("engineering20.0", 20.0))) + ); + } + + public void testFieldLevelSecuritySpellingMistake() throws Exception { + ResponseException e = expectThrows( + ResponseException.class, + () -> runESQLCommand("fls_user", "FROM index* | SORT parial | LIMIT 1") + ); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(EntityUtils.toString(e.getResponse().getEntity()), containsString("Unknown column [parial]")); + } + + public void testFieldLevelSecurityNotAllowed() throws Exception { + ResponseException e = expectThrows( + ResponseException.class, + () -> runESQLCommand("fls_user", "FROM index* | SORT org DESC | LIMIT 1") + ); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(400)); + assertThat(EntityUtils.toString(e.getResponse().getEntity()), containsString("Unknown column [org]")); + } + public void testRowCommand() throws Exception { String user = randomFrom("test-admin", "user1", "user2"); Response resp = runESQLCommand(user, "row a = 5, b = 2 | stats count=sum(b) by a"); @@ -283,6 +360,7 @@ protected Response runESQLCommand(String user, String command) throws IOExceptio Request request = new Request("POST", "_query"); request.setJsonEntity(Strings.toString(json)); request.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("es-security-runas-user", user)); + request.addParameter("error_trace", "true"); return client().performRequest(request); } diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml b/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml index 7a89fa57f7102..7d134103afd28 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml @@ -51,9 +51,22 @@ user4: - names: ['index-user1', 'index', "test-enrich" ] privileges: - read + user5: cluster: [] indices: - names: ['index-user1', 'index', "test-enrich" ] privileges: - read + +fls_user: + cluster: [] + indices: + - names: [ 'index' ] + privileges: [ 'read' ] + field_security: + grant: [ value, partial ] + - names: [ 'indexpartial' ] + privileges: [ 'read' ] + field_security: + grant: [ value ] diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java index 4f4091873fba9..3511cbda1841b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/response/XContentUtils.java @@ -15,6 +15,13 @@ public class XContentUtils { + /** + * Moves to the first valid token, which is non-null. + * Does not move, if the parser is already positioned at a valid token. + * + * @param parser parser to move + * @throws IOException if underlying parser methods throw + */ public static void moveToFirstToken(XContentParser parser) throws IOException { if (parser.currentToken() == null) { parser.nextToken(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java index 1e2353f901705..bafe1b031b028 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceFields.java @@ -11,4 +11,6 @@ public class OpenAiServiceFields { public static final String USER = "user"; + public static final String ORGANIZATION = "organization_id"; + } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java index 0150d75b7037e..16b0ed5d47039 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettings.java @@ -31,6 +31,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.ORGANIZATION; /** * Defines the service settings for interacting with OpenAI's chat completion models. @@ -39,8 +40,6 @@ public class OpenAiChatCompletionServiceSettings implements ServiceSettings { public static final String NAME = "openai_completion_service_settings"; - static final String ORGANIZATION = "organization_id"; - public static OpenAiChatCompletionServiceSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java index fb10d959087de..2d5a407f3c1a6 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettings.java @@ -22,13 +22,12 @@ import java.util.Objects; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.USER; public class OpenAiChatCompletionTaskSettings implements TaskSettings { public static final String NAME = "openai_completion_task_settings"; - public static final String USER = "user"; - public static OpenAiChatCompletionTaskSettings fromMap(Map map) { ValidationException validationException = new ValidationException(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java index 5bdb0d7542a83..373704af37fcd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettings.java @@ -16,7 +16,7 @@ import java.util.Map; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; -import static org.elasticsearch.xpack.inference.services.openai.embeddings.OpenAiEmbeddingsTaskSettings.USER; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.USER; /** * This class handles extracting OpenAI task settings from a request. The difference between this class and diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java index 34713ff2b7208..01aa4f51799fb 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java @@ -37,6 +37,7 @@ import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractRequiredString; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractSimilarity; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeAsType; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.ORGANIZATION; /** * Defines the service settings for interacting with OpenAI's text embedding models. @@ -45,7 +46,6 @@ public class OpenAiEmbeddingsServiceSettings implements ServiceSettings { public static final String NAME = "openai_service_settings"; - static final String ORGANIZATION = "organization_id"; static final String DIMENSIONS_SET_BY_USER = "dimensions_set_by_user"; public static OpenAiEmbeddingsServiceSettings fromMap(Map map, ConfigurationParseContext context) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java index a7b11487ca72f..e306f2d3d2928 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java @@ -23,6 +23,7 @@ import java.util.Objects; import static org.elasticsearch.xpack.inference.services.ServiceUtils.extractOptionalString; +import static org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields.USER; /** * Defines the task settings for the openai service. @@ -33,7 +34,6 @@ public class OpenAiEmbeddingsTaskSettings implements TaskSettings { public static final String NAME = "openai_embeddings_task_settings"; - public static final String USER = "user"; public static OpenAiEmbeddingsTaskSettings fromMap(Map map, ConfigurationParseContext context) { ValidationException validationException = new ValidationException(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java new file mode 100644 index 0000000000000..c8de0371ab196 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/response/XContentUtilsTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.external.response; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.Locale; + +public class XContentUtilsTests extends ESTestCase { + + public void testMoveToFirstToken() throws IOException { + var json = """ + { + "key": "value" + } + """; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + assertNull(parser.currentToken()); + + XContentUtils.moveToFirstToken(parser); + + assertEquals(XContentParser.Token.START_OBJECT, parser.currentToken()); + } + } + + public void testMoveToFirstToken_DoesNotMoveIfAlreadyAtAToken() throws IOException { + var json = """ + { + "key": "value" + } + """; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + // position at a valid token + parser.nextToken(); + assertEquals(XContentParser.Token.START_OBJECT, parser.currentToken()); + + XContentUtils.moveToFirstToken(parser); + + // still at the beginning of the object + assertEquals(XContentParser.Token.START_OBJECT, parser.currentToken()); + } + } + + public void testPositionParserAtTokenAfterField() throws IOException { + var json = """ + { + "key": "value" + } + """; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + XContentUtils.positionParserAtTokenAfterField(parser, "key", "some error"); + + assertEquals("value", parser.text()); + } + } + + public void testPositionParserAtTokenAfterField_ThrowsIfFieldIsMissing() throws IOException { + var json = """ + { + "key": "value" + } + """; + var errorFormat = "Error: %s"; + var missingField = "missing field"; + + try (XContentParser parser = createParser(XContentType.JSON.xContent(), json)) { + var exception = expectThrows( + IllegalStateException.class, + () -> XContentUtils.positionParserAtTokenAfterField(parser, missingField, errorFormat) + ); + + assertEquals(String.format(Locale.ROOT, errorFormat, missingField), exception.getMessage()); + } + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java index 24632e120f94b..6fbdd3bf622d3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionRequestTaskSettingsTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import java.util.HashMap; import java.util.Map; @@ -28,9 +29,7 @@ public void testFromMap_ReturnsEmptySettings_WhenTheMapDoesNotContainTheFields() } public void testFromMap_ReturnsUser() { - var settings = OpenAiChatCompletionRequestTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user")) - ); + var settings = OpenAiChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user"))); assertThat(settings.user(), is("user")); } @@ -38,7 +37,7 @@ public static Map getChatCompletionRequestTaskSettingsMap(@Nulla var map = new HashMap(); if (user != null) { - map.put(OpenAiChatCompletionTaskSettings.USER, user); + map.put(OpenAiServiceFields.USER, user); } return map; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java index 8778b2f13e746..ba2460f7bc09a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionServiceSettingsTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import java.io.IOException; import java.net.URI; @@ -40,7 +41,7 @@ public void testFromMap_Request_CreatesSettingsCorrectly() { modelId, ServiceFields.URL, url, - OpenAiChatCompletionServiceSettings.ORGANIZATION, + OpenAiServiceFields.ORGANIZATION, org, ServiceFields.MAX_INPUT_TOKENS, maxInputTokens @@ -61,7 +62,7 @@ public void testFromMap_MissingUrl_DoesNotThrowException() { Map.of( ServiceFields.MODEL_ID, modelId, - OpenAiChatCompletionServiceSettings.ORGANIZATION, + OpenAiServiceFields.ORGANIZATION, organization, ServiceFields.MAX_INPUT_TOKENS, maxInputTokens @@ -109,7 +110,7 @@ public void testFromMap_EmptyOrganization_ThrowsError() { var thrownException = expectThrows( ValidationException.class, () -> OpenAiChatCompletionServiceSettings.fromMap( - new HashMap<>(Map.of(OpenAiChatCompletionServiceSettings.ORGANIZATION, "", ServiceFields.MODEL_ID, "model")) + new HashMap<>(Map.of(OpenAiServiceFields.ORGANIZATION, "", ServiceFields.MODEL_ID, "model")) ) ); @@ -118,7 +119,7 @@ public void testFromMap_EmptyOrganization_ThrowsError() { containsString( org.elasticsearch.common.Strings.format( "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", - OpenAiChatCompletionServiceSettings.ORGANIZATION + OpenAiServiceFields.ORGANIZATION ) ) ); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java index 66a9ec371eb93..f2bd26a4e6432 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/completion/OpenAiChatCompletionTaskSettingsTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import java.io.IOException; import java.util.HashMap; @@ -27,14 +28,14 @@ public static OpenAiChatCompletionTaskSettings createRandomWithUser() { public void testFromMap_WithUser() { assertEquals( new OpenAiChatCompletionTaskSettings("user"), - OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user"))) + OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user"))) ); } public void testFromMap_UserIsEmptyString() { var thrownException = expectThrows( ValidationException.class, - () -> OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, ""))) + () -> OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, ""))) ); assertThat( @@ -49,7 +50,7 @@ public void testFromMap_MissingUser_DoesNotThrowException() { } public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { - var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user"))); + var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user"))); var overriddenTaskSettings = OpenAiChatCompletionTaskSettings.of( taskSettings, @@ -59,11 +60,9 @@ public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { } public void testOverrideWith_UsesOverriddenSettings() { - var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user"))); + var taskSettings = OpenAiChatCompletionTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user"))); - var requestTaskSettings = OpenAiChatCompletionRequestTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiChatCompletionTaskSettings.USER, "user2")) - ); + var requestTaskSettings = OpenAiChatCompletionRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user2"))); var overriddenTaskSettings = OpenAiChatCompletionTaskSettings.of(taskSettings, requestTaskSettings); assertThat(overriddenTaskSettings, is(new OpenAiChatCompletionTaskSettings("user2"))); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java index 5a39fcb61ff0a..c95853e2d0128 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsRequestTaskSettingsTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import java.util.HashMap; import java.util.Map; @@ -27,7 +28,7 @@ public void testFromMap_ReturnsEmptySettings_WhenTheMapDoesNotContainTheFields() } public void testFromMap_ReturnsUser() { - var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user"))); + var settings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user"))); assertThat(settings.user(), is("user")); } @@ -35,7 +36,7 @@ public static Map getRequestTaskSettingsMap(@Nullable String use var map = new HashMap(); if (user != null) { - map.put(OpenAiEmbeddingsTaskSettings.USER, user); + map.put(OpenAiServiceFields.USER, user); } return map; diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java index 00cea6dc6ed21..e37318a0c96d4 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettingsTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.ServiceUtils; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import org.hamcrest.CoreMatchers; import java.io.IOException; @@ -79,7 +80,7 @@ public void testFromMap_Request_CreatesSettingsCorrectly() { modelId, ServiceFields.URL, url, - OpenAiEmbeddingsServiceSettings.ORGANIZATION, + OpenAiServiceFields.ORGANIZATION, org, ServiceFields.SIMILARITY, similarity, @@ -121,7 +122,7 @@ public void testFromMap_Request_DimensionsSetByUser_IsFalse_WhenDimensionsAreNot modelId, ServiceFields.URL, url, - OpenAiEmbeddingsServiceSettings.ORGANIZATION, + OpenAiServiceFields.ORGANIZATION, org, ServiceFields.SIMILARITY, similarity, @@ -162,7 +163,7 @@ public void testFromMap_Persistent_CreatesSettingsCorrectly() { modelId, ServiceFields.URL, url, - OpenAiEmbeddingsServiceSettings.ORGANIZATION, + OpenAiServiceFields.ORGANIZATION, org, ServiceFields.SIMILARITY, similarity, @@ -219,7 +220,7 @@ public void testFromMap_PersistentContext_ThrowsException_WhenDimensionsSetByUse public void testFromMap_MissingUrl_DoesNotThrowException() { var serviceSettings = OpenAiEmbeddingsServiceSettings.fromMap( - new HashMap<>(Map.of(ServiceFields.MODEL_ID, "m", OpenAiEmbeddingsServiceSettings.ORGANIZATION, "org")), + new HashMap<>(Map.of(ServiceFields.MODEL_ID, "m", OpenAiServiceFields.ORGANIZATION, "org")), ConfigurationParseContext.REQUEST ); assertNull(serviceSettings.uri()); @@ -260,7 +261,7 @@ public void testFromMap_EmptyOrganization_ThrowsError() { var thrownException = expectThrows( ValidationException.class, () -> OpenAiEmbeddingsServiceSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsServiceSettings.ORGANIZATION, "", ServiceFields.MODEL_ID, "m")), + new HashMap<>(Map.of(OpenAiServiceFields.ORGANIZATION, "", ServiceFields.MODEL_ID, "m")), ConfigurationParseContext.REQUEST ) ); @@ -270,7 +271,7 @@ public void testFromMap_EmptyOrganization_ThrowsError() { containsString( Strings.format( "Validation Failed: 1: [service_settings] Invalid value empty string. [%s] must be a non-empty string;", - OpenAiEmbeddingsServiceSettings.ORGANIZATION + OpenAiServiceFields.ORGANIZATION ) ) ); @@ -375,7 +376,7 @@ public static Map getServiceSettingsMap(String modelId, @Nullabl } if (org != null) { - map.put(OpenAiEmbeddingsServiceSettings.ORGANIZATION, org); + map.put(OpenAiServiceFields.ORGANIZATION, org); } return map; } @@ -395,7 +396,7 @@ public static Map getServiceSettingsMap( } if (org != null) { - map.put(OpenAiEmbeddingsServiceSettings.ORGANIZATION, org); + map.put(OpenAiServiceFields.ORGANIZATION, org); } if (dimensions != null) { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java index 6448b66d11cf3..c5a510ef9de0c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettingsTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; +import org.elasticsearch.xpack.inference.services.openai.OpenAiServiceFields; import org.hamcrest.MatcherAssert; import java.io.IOException; @@ -38,10 +39,7 @@ public static OpenAiEmbeddingsTaskSettings createRandom() { public void testFromMap_WithUser() { assertEquals( new OpenAiEmbeddingsTaskSettings("user"), - OpenAiEmbeddingsTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user")), - ConfigurationParseContext.REQUEST - ) + OpenAiEmbeddingsTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user")), ConfigurationParseContext.REQUEST) ); } @@ -49,7 +47,7 @@ public void testFromMap_UserIsEmptyString() { var thrownException = expectThrows( ValidationException.class, () -> OpenAiEmbeddingsTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "")), + new HashMap<>(Map.of(OpenAiServiceFields.USER, "")), ConfigurationParseContext.REQUEST ) ); @@ -67,7 +65,7 @@ public void testFromMap_MissingUser_DoesNotThrowException() { public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user")), + new HashMap<>(Map.of(OpenAiServiceFields.USER, "user")), ConfigurationParseContext.PERSISTENT ); @@ -77,13 +75,11 @@ public void testOverrideWith_KeepsOriginalValuesWithOverridesAreNull() { public void testOverrideWith_UsesOverriddenSettings() { var taskSettings = OpenAiEmbeddingsTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user")), + new HashMap<>(Map.of(OpenAiServiceFields.USER, "user")), ConfigurationParseContext.PERSISTENT ); - var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap( - new HashMap<>(Map.of(OpenAiEmbeddingsTaskSettings.USER, "user2")) - ); + var requestTaskSettings = OpenAiEmbeddingsRequestTaskSettings.fromMap(new HashMap<>(Map.of(OpenAiServiceFields.USER, "user2"))); var overriddenTaskSettings = OpenAiEmbeddingsTaskSettings.of(taskSettings, requestTaskSettings); MatcherAssert.assertThat(overriddenTaskSettings, is(new OpenAiEmbeddingsTaskSettings("user2"))); @@ -108,7 +104,7 @@ public static Map getTaskSettingsMap(@Nullable String user) { var map = new HashMap(); if (user != null) { - map.put(OpenAiEmbeddingsTaskSettings.USER, user); + map.put(OpenAiServiceFields.USER, user); } return map; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 83be62beab4ec..591b20bd82f47 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.search.ClosePointInTimeRequest; @@ -48,6 +49,7 @@ import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; import org.elasticsearch.test.InternalSettingsPlugin; @@ -68,6 +70,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; @@ -77,6 +80,8 @@ import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCountAndNoFailures; @@ -137,6 +142,9 @@ protected String configUsers() { + "\n" + "user9:" + usersPasswHashed + + "\n" + + "user_different_fields:" + + usersPasswHashed + "\n"; } @@ -150,7 +158,8 @@ protected String configUsersRoles() { role5:user4,user7 role6:user5,user7 role7:user6 - role8:user9"""; + role8:user9 + role_different_fields:user_different_fields"""; } @Override @@ -213,6 +222,16 @@ protected String configRoles() { privileges: [ ALL ] field_security: grant: [ 'field*', 'query' ] + role_different_fields: + indices: + - names: [ 'partial1*' ] + privileges: [ 'read' ] + field_security: + grant: [ value, partial ] + - names: [ 'partial2*' ] + privileges: [ 'read' ] + field_security: + grant: [ value ] """; } @@ -2336,4 +2355,49 @@ public void testLookupRuntimeFields() throws Exception { ); } + public void testSearchDifferentFieldsVisible() { + String firstName = "partial1" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + String secondName = "partial2" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + indexPartial(firstName, secondName); + SearchResponse response = client().filterWithHeader( + Map.of(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_different_fields", USERS_PASSWD)) + ).prepareSearch("partial*").addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC)).get(); + try { + assertMap(response.getHits().getAt(0).getSourceAsMap(), matchesMap().entry("value", 1).entry("partial", 2)); + assertMap(response.getHits().getAt(1).getSourceAsMap(), matchesMap().entry("value", 2)); + } finally { + response.decRef(); + } + } + + /** + * The fields {@code partial} is only visible in one of the two backing indices and field caps should show it. + */ + public void testFieldCapsDifferentFieldsVisible() { + String firstName = "partial1_" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + String secondName = "partial2_" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + indexPartial(firstName, secondName); + FieldCapabilitiesResponse response = client().filterWithHeader( + Map.of(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_different_fields", USERS_PASSWD)) + ).prepareFieldCaps("partial*").setFields("value", "partial").get(); + try { + assertThat(response.get().keySet(), equalTo(Set.of("value", "partial"))); + assertThat(response.getField("value").keySet(), equalTo(Set.of("long"))); + assertThat(response.getField("partial").keySet(), equalTo(Set.of("long"))); + } finally { + response.decRef(); + } + } + + private void indexPartial(String firstName, String secondName) { + BulkResponse bulkResponse = client().prepareBulk() + .add(client().prepareIndex(firstName).setSource("value", 1, "partial", 2)) + .add(client().prepareIndex(secondName).setSource("value", 2, "partial", 3)) + .setRefreshPolicy(IMMEDIATE) + .get(); + for (var i : bulkResponse.getItems()) { + assertThat(i.getFailure(), nullValue()); + assertThat(i.status(), equalTo(RestStatus.CREATED)); + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index ae6df838b4eac..5736d3e550f01 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -74,6 +74,7 @@ import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.NetworkPlugin; @@ -1947,29 +1948,29 @@ public UnaryOperator> getIndexTemplateMetadat } @Override - public Function> getFieldFilter() { + public Function getFieldFilter() { if (enabled) { return index -> { XPackLicenseState licenseState = getLicenseState(); IndicesAccessControl indicesAccessControl = threadContext.get() .getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); if (indicesAccessControl == null) { - return MapperPlugin.NOOP_FIELD_PREDICATE; + return FieldPredicate.ACCEPT_ALL; } assert indicesAccessControl.isGranted(); IndicesAccessControl.IndexAccessControl indexPermissions = indicesAccessControl.getIndexPermissions(index); if (indexPermissions == null) { - return MapperPlugin.NOOP_FIELD_PREDICATE; + return FieldPredicate.ACCEPT_ALL; } FieldPermissions fieldPermissions = indexPermissions.getFieldPermissions(); if (fieldPermissions.hasFieldLevelSecurity() == false) { - return MapperPlugin.NOOP_FIELD_PREDICATE; + return FieldPredicate.ACCEPT_ALL; } if (FIELD_LEVEL_SECURITY_FEATURE.checkWithoutTracking(licenseState) == false) { // check license last, once we know FLS is actually used - return MapperPlugin.NOOP_FIELD_PREDICATE; + return FieldPredicate.ACCEPT_ALL; } - return fieldPermissions::grantsAccessTo; + return fieldPermissions.fieldPredicate(); }; } return MapperPlugin.super.getFieldFilter(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 6a869377d7b07..f575bb6adc50e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -52,6 +52,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.license.internal.XPackLicenseStatus; import org.elasticsearch.plugins.ExtensiblePlugin; +import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.internal.RestExtension; import org.elasticsearch.rest.RestHandler; @@ -120,7 +121,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Function; -import java.util.function.Predicate; import java.util.stream.Collectors; import static java.util.Collections.emptyMap; @@ -469,7 +469,7 @@ public void testJoinValidatorForFIPSOnForbiddenLicense() throws Exception { public void testGetFieldFilterSecurityEnabled() throws Exception { createComponents(Settings.EMPTY); - Function> fieldFilter = security.getFieldFilter(); + Function fieldFilter = security.getFieldFilter(); assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); Map permissionsMap = new HashMap<>(); @@ -491,9 +491,9 @@ public void testGetFieldFilterSecurityEnabled() throws Exception { assertThat(fieldFilter.apply("index_granted"), trueWith("field_granted")); assertThat(fieldFilter.apply("index_granted"), falseWith(randomAlphaOfLengthBetween(3, 10))); - assertEquals(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("index_granted_all_permissions")); + assertEquals(FieldPredicate.ACCEPT_ALL, fieldFilter.apply("index_granted_all_permissions")); assertThat(fieldFilter.apply("index_granted_all_permissions"), trueWith(randomAlphaOfLengthBetween(3, 10))); - assertEquals(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply("index_other")); + assertEquals(FieldPredicate.ACCEPT_ALL, fieldFilter.apply("index_other")); } public void testGetFieldFilterSecurityDisabled() throws Exception { @@ -503,7 +503,7 @@ public void testGetFieldFilterSecurityDisabled() throws Exception { public void testGetFieldFilterSecurityEnabledLicenseNoFLS() throws Exception { createComponents(Settings.EMPTY); - Function> fieldFilter = security.getFieldFilter(); + Function fieldFilter = security.getFieldFilter(); assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); licenseState.update( new XPackLicenseStatus( @@ -513,7 +513,7 @@ public void testGetFieldFilterSecurityEnabledLicenseNoFLS() throws Exception { ) ); assertNotSame(MapperPlugin.NOOP_FIELD_FILTER, fieldFilter); - assertSame(MapperPlugin.NOOP_FIELD_PREDICATE, fieldFilter.apply(randomAlphaOfLengthBetween(3, 6))); + assertSame(FieldPredicate.ACCEPT_ALL, fieldFilter.apply(randomAlphaOfLengthBetween(3, 6))); } public void testValidateRealmsWhenSettingsAreInvalid() { diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/runtime/AbstractGeoShapeScriptFieldQuery.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/runtime/AbstractGeoShapeScriptFieldQuery.java index 18020bd44ca6e..c178b20530f0c 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/runtime/AbstractGeoShapeScriptFieldQuery.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/runtime/AbstractGeoShapeScriptFieldQuery.java @@ -22,7 +22,7 @@ abstract class AbstractGeoShapeScriptFieldQuery extends AbstractScriptFieldQuery } @Override - protected boolean matches(GeometryFieldScript scriptContext, int docId) { + protected final boolean matches(GeometryFieldScript scriptContext, int docId) { scriptContext.runForDoc(docId); return matches(scriptContext.geometry()); } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeScriptFieldTypeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeScriptFieldTypeTests.java index 331bfbf8cd305..592cb65800b71 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeScriptFieldTypeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeScriptFieldTypeTests.java @@ -99,8 +99,8 @@ protected ScriptFactory dummyScript() { @Override public void testDocValues() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); List results = new ArrayList<>(); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); @@ -128,6 +128,7 @@ public void collect(int doc) throws IOException { }; } }); + assertEquals(2, results.size()); } } } @@ -141,7 +142,7 @@ public void testSort() throws IOException { public void testFetch() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef(""" + addDocument(iw, List.of(new StoredField("_source", new BytesRef(""" {"foo": {"coordinates": [[45.0, 45.0], [0.0, 0.0]], "type" : "LineString"}}""")))); try (DirectoryReader reader = iw.getReader()) { SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -162,8 +163,8 @@ public void testFetch() throws IOException { @Override public void testUsedInScript() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); SearchExecutionContext searchContext = mockContext(true, simpleMappedFieldType()); @@ -196,8 +197,8 @@ public double execute(ExplanationHolder explanation) { @Override public void testExistsQuery() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); - iw.addDocument(List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(0.0 0.0, 1.0 1.0)\" }")))); + addDocument(iw, List.of(new StoredField("_source", new BytesRef("{\"foo\": \"LINESTRING(45.0 45.0, 3.0 3.0)\" }")))); try (DirectoryReader reader = iw.getReader()) { IndexSearcher searcher = newSearcher(reader); assertThat(searcher.count(simpleMappedFieldType().existsQuery(mockContext())), equalTo(2)); diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 254d12a05d936..d7760eb42a1db 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -731,7 +731,7 @@ private void assertBasicWatchInteractions() throws Exception { Map updateWatch = entityAsMap(client().performRequest(createWatchRequest)); assertThat(updateWatch.get("created"), equalTo(false)); - assertThat(updateWatch.get("_version"), equalTo(2)); + assertThat((int) updateWatch.get("_version"), greaterThanOrEqualTo(2)); Map get = entityAsMap(client().performRequest(new Request("GET", "_watcher/watch/new_watch"))); assertThat(get.get("found"), equalTo(true));