diff --git a/.backportrc.json b/.backportrc.json index 77b06cd41927..d2e92817c026 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,9 +1,10 @@ { "upstream" : "elastic/elasticsearch", - "targetBranchChoices" : [ "main", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], + "targetBranchChoices" : [ "main", "8.x", "8.15", "8.14", "8.13", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], "targetPRLabels" : [ "backport" ], "branchLabelMapping" : { - "^v8.16.0$" : "main", + "^v9.0.0$" : "main", + "^v8.16.0$" : "8.x", "^v(\\d+).(\\d+).\\d+(?:-(?:alpha|beta|rc)\\d+)?$" : "$1.$2" } -} \ No newline at end of file +} diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index beb45107bc31..e7ba4ba7610c 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -62,7 +62,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.24", "8.15.2", "8.16.0"] + BWC_VERSION: ["8.15.2", "8.16.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index cd0bc8449f89..8ef8f5954887 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -33,312 +33,6 @@ steps: env: {} - group: packaging-tests-upgrade steps: - - label: "{{matrix.image}} / 7.0.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.0.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.0.1 - - - label: "{{matrix.image}} / 7.1.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.1.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.1.1 - - - label: "{{matrix.image}} / 7.2.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.2.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.2.1 - - - label: "{{matrix.image}} / 7.3.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.3.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.3.2 - - - label: "{{matrix.image}} / 7.4.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.4.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.4.2 - - - label: "{{matrix.image}} / 7.5.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.5.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.5.2 - - - label: "{{matrix.image}} / 7.6.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.6.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.6.2 - - - label: "{{matrix.image}} / 7.7.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.7.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.7.1 - - - label: "{{matrix.image}} / 7.8.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.8.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.8.1 - - - label: "{{matrix.image}} / 7.9.3 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.9.3 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.9.3 - - - label: "{{matrix.image}} / 7.10.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.10.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.10.2 - - - label: "{{matrix.image}} / 7.11.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.11.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.11.2 - - - label: "{{matrix.image}} / 7.12.1 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.12.1 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.12.1 - - - label: "{{matrix.image}} / 7.13.4 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.4 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.13.4 - - - label: "{{matrix.image}} / 7.14.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.14.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.14.2 - - - label: "{{matrix.image}} / 7.15.2 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.15.2 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.15.2 - - - label: "{{matrix.image}} / 7.16.3 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.16.3 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.16.3 - - - label: "{{matrix.image}} / 7.17.24 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.24 - timeout_in_minutes: 300 - matrix: - setup: - image: - - rocky-8 - - ubuntu-2004 - agents: - provider: gcp - image: family/elasticsearch-{{matrix.image}} - machineType: custom-16-32768 - buildDirectory: /dev/shm/bk - diskSizeGb: 250 - env: - BWC_VERSION: 7.17.24 - - label: "{{matrix.image}} / 8.0.1 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.1 timeout_in_minutes: 300 @@ -628,6 +322,23 @@ steps: env: BWC_VERSION: 8.16.0 + - label: "{{matrix.image}} / 9.0.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v9.0.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + diskSizeGb: 250 + env: + BWC_VERSION: 9.0.0 + - group: packaging-tests-windows steps: - label: "{{matrix.image}} / packaging-tests-windows" diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 8f25a0fb1106..5f75b7f1a2ef 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -2,366 +2,6 @@ steps: - group: bwc steps: - - label: 7.0.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.0.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.0.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.1.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.1.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.1.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.2.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.2.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.2.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.3.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.3.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.3.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.4.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.4.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.4.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.5.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.5.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.5.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.6.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.6.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.6.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.7.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.7.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.7.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.8.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.8.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.8.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.9.3 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.9.3#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.9.3 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.10.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.10.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.10.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.11.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.11.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.11.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.12.1 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.12.1#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.12.1 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.13.4 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.4#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.13.4 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.14.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.14.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.14.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.15.2 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.15.2#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.15.2 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.16.3 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.16.3#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.16.3 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - - label: 7.17.24 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.24#bwcTest - timeout_in_minutes: 300 - agents: - provider: gcp - image: family/elasticsearch-ubuntu-2004 - machineType: n1-standard-32 - buildDirectory: /dev/shm/bk - preemptible: true - diskSizeGb: 250 - env: - BWC_VERSION: 7.17.24 - retry: - automatic: - - exit_status: "-1" - limit: 3 - signal_reason: none - - signal_reason: agent_stop - limit: 3 - - label: 8.0.1 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.1#bwcTest timeout_in_minutes: 300 @@ -702,6 +342,26 @@ steps: - signal_reason: agent_stop limit: 3 + - label: 9.0.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v9.0.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + preemptible: true + diskSizeGb: 250 + env: + BWC_VERSION: 9.0.0 + retry: + automatic: + - exit_status: "-1" + limit: 3 + signal_reason: none + - signal_reason: agent_stop + limit: 3 + - label: concurrent-search-tests command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.jvm.argline=-Des.concurrent_search=true -Des.concurrent_search=true functionalTests timeout_in_minutes: 420 @@ -771,7 +431,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.24", "8.15.2", "8.16.0"] + BWC_VERSION: ["8.15.2", "8.16.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -821,7 +481,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.24", "8.15.2", "8.16.0"] + BWC_VERSION: ["8.15.2", "8.16.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index b80309cdb3f8..498727b3ecd3 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -1,22 +1,4 @@ BWC_VERSION: - - "7.0.1" - - "7.1.1" - - "7.2.1" - - "7.3.2" - - "7.4.2" - - "7.5.2" - - "7.6.2" - - "7.7.1" - - "7.8.1" - - "7.9.3" - - "7.10.2" - - "7.11.2" - - "7.12.1" - - "7.13.4" - - "7.14.2" - - "7.15.2" - - "7.16.3" - - "7.17.24" - "8.0.1" - "8.1.3" - "8.2.3" @@ -34,3 +16,4 @@ BWC_VERSION: - "8.14.3" - "8.15.2" - "8.16.0" + - "9.0.0" diff --git a/.ci/java-versions-aarch64.properties b/.ci/java-versions-aarch64.properties index b1e0f4cfe8af..8815d5011a8e 100644 --- a/.ci/java-versions-aarch64.properties +++ b/.ci/java-versions-aarch64.properties @@ -4,4 +4,4 @@ # build and test Elasticsearch for this branch. Valid Java versions # are 'java' or 'openjdk' followed by the major release number. -ES_BUILD_JAVA=jdk17 +ES_BUILD_JAVA=jdk21 diff --git a/.ci/java-versions-fips.properties b/.ci/java-versions-fips.properties index fa6873935576..87490374bb99 100644 --- a/.ci/java-versions-fips.properties +++ b/.ci/java-versions-fips.properties @@ -4,4 +4,4 @@ # build and test Elasticsearch for this branch. Valid Java versions # are 'java' or 'openjdk' followed by the major release number. -ES_BUILD_JAVA=openjdk17 +ES_BUILD_JAVA=openjdk21 diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index 21884973742b..5ed97ab248c2 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -4,4 +4,4 @@ # build and test Elasticsearch for this branch. Valid Java versions # are 'java' or 'openjdk' followed by the major release number. -ES_BUILD_JAVA=openjdk17 +ES_BUILD_JAVA=openjdk21 diff --git a/.ci/scripts/resolve-dra-manifest.sh b/.ci/scripts/resolve-dra-manifest.sh index bd7a9bbbdafe..e42a22834ccc 100755 --- a/.ci/scripts/resolve-dra-manifest.sh +++ b/.ci/scripts/resolve-dra-manifest.sh @@ -23,6 +23,12 @@ LATEST_VERSION=$(strip_version $LATEST_BUILD) if [ "$LATEST_VERSION" != "$ES_VERSION" ]; then echo "Latest build for '$ARTIFACT' is version $LATEST_VERSION but expected version $ES_VERSION." 1>&2 NEW_BRANCH=$(echo $ES_VERSION | sed -E "s/([0-9]+\.[0-9]+)\.[0-9]/\1/g") + + # Temporary + if [[ "$ES_VERSION" == "8.16.0" ]]; then + NEW_BRANCH="8.x" + fi + echo "Using branch $NEW_BRANCH instead of $BRANCH." 1>&2 LATEST_BUILD=$(fetch_build $WORKFLOW $ARTIFACT $NEW_BRANCH) fi diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index e41bbac68f1e..a2f1e0c675ea 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,4 +1,4 @@ BWC_VERSION: - - "7.17.24" - "8.15.2" - "8.16.0" + - "9.0.0" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5b98444c044d..f0d906882002 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -70,3 +70,7 @@ server/src/main/java/org/elasticsearch/threadpool @elastic/es-core-infra # Security x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege @elastic/es-security x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @elastic/es-security + +# Analytical engine +x-pack/plugin/esql @elastic/es-analytical-engine +x-pack/plugin/esql-core @elastic/es-analytical-engine diff --git a/.github/updatecli/values.d/ironbank.yml b/.github/updatecli/values.d/ironbank.yml new file mode 100644 index 000000000000..10b2c948a583 --- /dev/null +++ b/.github/updatecli/values.d/ironbank.yml @@ -0,0 +1,3 @@ +config: + - path: distribution/docker/src/docker/iron_bank + dockerfile: ../Dockerfile diff --git a/.github/updatecli/values.d/scm.yml b/.github/updatecli/values.d/scm.yml new file mode 100644 index 000000000000..ec4bd2d61209 --- /dev/null +++ b/.github/updatecli/values.d/scm.yml @@ -0,0 +1,10 @@ +scm: + enabled: true + owner: elastic + repository: elasticsearch + branch: main + commitusingapi: true + # begin updatecli-compose policy values + user: elasticmachine + email: 42973632+elasticmachine@users.noreply.github.com + # end updatecli-compose policy values diff --git a/.github/updatecli/values.d/updatecli-compose.yml b/.github/updatecli/values.d/updatecli-compose.yml new file mode 100644 index 000000000000..02df609f2a30 --- /dev/null +++ b/.github/updatecli/values.d/updatecli-compose.yml @@ -0,0 +1,3 @@ +spec: + files: + - "updatecli-compose.yaml" \ No newline at end of file diff --git a/.github/workflows/updatecli-compose.yml b/.github/workflows/updatecli-compose.yml new file mode 100644 index 000000000000..cbab42d3a63b --- /dev/null +++ b/.github/workflows/updatecli-compose.yml @@ -0,0 +1,38 @@ +--- +name: updatecli-compose + +on: + workflow_dispatch: + schedule: + - cron: '0 6 * * *' + +permissions: + contents: read + +jobs: + compose: + runs-on: ubuntu-latest + permissions: + contents: write + packages: read + pull-requests: write + steps: + - uses: actions/checkout@v4 + + - uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - uses: elastic/oblt-actions/updatecli/run@v1 + with: + command: --experimental compose diff + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - uses: elastic/oblt-actions/updatecli/run@v1 + with: + command: --experimental compose apply + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/REST_API_COMPATIBILITY.md b/REST_API_COMPATIBILITY.md index c36b4ea9dbfb..4a6ad4e7e17f 100644 --- a/REST_API_COMPATIBILITY.md +++ b/REST_API_COMPATIBILITY.md @@ -158,12 +158,12 @@ The above code checks the request's compatible version and if the request has th The primary means of testing compatibility is via the prior major version's YAML REST tests. The build system will download the latest prior version of the YAML rest tests and execute them against the current cluster version. Prior to execution the tests will be transformed by injecting the correct headers to enable compatibility as well as other custom changes to the tests to allow the tests to pass. These customizations are configured via the build.gradle and happen just prior to test execution. Since the compatibility tests are manipulated version of the tests stored in Github (via the past major version), it is important to find the local (on disk) version for troubleshooting compatibility tests. -The tests are wired into the `check` task, so that is the easiest way to test locally prior to committing. More specifically the task is called `yamlRestTestV7CompatTest`, where 7 is the version of tests that are executing. For example, version 8 of the server will have a task named `yamlRestTestV7CompatTest` and version 9 of the server will have a task named `yamlRestTestV8CompatTest`. These behaves nearly identical to it's non-compat `yamlRestTest` task. The only variance is that the tests are sourced from the prior version branch and the tests go through a transformation phase before execution. The transformation task is `yamlRestTestV7CompatTransform` where the Vnumber follows the same convention as the test. +The tests are wired into the `check` task, so that is the easiest way to test locally prior to committing. More specifically the task is called `yamlRestCompatTest`. These behave nearly identical to it's non-compat `yamlRestTest` task. The only variance is that the tests are sourced from the prior version branch and the tests go through a transformation phase before execution. The transformation task is `yamlRestCompatTestTransform`. For example: ```bash -./gradlew :rest-api-spec:yamlRestTestV7CompatTest +./gradlew :rest-api-spec:yamlRestCompatTest ``` Since these are a variation of backward compatibility testing, the entire suite of compatibility tests will be skipped anytime the backward compatibility testing is disabled. Since the source code for these tests live in a branch of code, disabling a specific test should be done via the transformation task configuration in build.gradle (i.e. `yamlRestTestV7CompatTransform`). @@ -188,7 +188,7 @@ Muting compatibility tests should be done via a test transform. A per test skip ```groovy -tasks.named("yamlRestTestV7CompatTransform").configure({ task -> +tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTestsByFilePattern("**/cat*/*.yml", "Cat API are not supported") task.skipTest("bulk/10_basic/Array of objects", "Muted due failures. See #12345") }) diff --git a/branches.json b/branches.json index 1d860501cbc8..e464d6179f2b 100644 --- a/branches.json +++ b/branches.json @@ -4,6 +4,9 @@ { "branch": "main" }, + { + "branch": "8.x" + }, { "branch": "8.15" }, diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy index fc7ccd651d73..8c5c84a27671 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy @@ -27,7 +27,7 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF buildFile << """ apply plugin: 'elasticsearch.internal-distribution-bwc-setup' """ - execute("git branch origin/8.0", file("cloned")) + execute("git branch origin/8.x", file("cloned")) execute("git branch origin/7.16", file("cloned")) execute("git branch origin/7.15", file("cloned")) } @@ -113,9 +113,9 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF result.task(":distribution:bwc:minor:buildBwcDarwinTar").outcome == TaskOutcome.SUCCESS and: "assemble task triggered" result.output.contains("[8.0.0] > Task :distribution:archives:darwin-tar:extractedAssemble") - result.output.contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-8.0/" + + result.output.contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-8.x/" + "distribution/archives/darwin-tar/build/install") - result.output.contains("nested folder /distribution/bwc/minor/build/bwc/checkout-8.0/" + + result.output.contains("nested folder /distribution/bwc/minor/build/bwc/checkout-8.x/" + "distribution/archives/darwin-tar/build/install/elasticsearch-8.0.0-SNAPSHOT") } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy index 737c448f23be..3ffbd926ec84 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy @@ -19,10 +19,9 @@ import org.gradle.testkit.runner.TaskOutcome class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { - def compatibleVersion = Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() - 1 - def specIntermediateDir = "restResources/v${compatibleVersion}/yamlSpecs" - def testIntermediateDir = "restResources/v${compatibleVersion}/yamlTests" - def transformTask = ":yamlRestTestV${compatibleVersion}CompatTransform" + def specIntermediateDir = "restResources/compat/yamlSpecs" + def testIntermediateDir = "restResources/compat/yamlTests" + def transformTask = ":yamlRestCompatTestTransform" def YAML_FACTORY = new YAMLFactory() def MAPPER = new ObjectMapper(YAML_FACTORY) def READER = MAPPER.readerFor(ObjectNode.class) @@ -36,9 +35,11 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe buildApiRestrictionsDisabled = true } - def "yamlRestTestVxCompatTest does nothing when there are no tests"() { + def "yamlRestCompatTest does nothing when there are no tests"() { given: - subProject(":distribution:bwc:maintenance") << """ + internalBuild() + + subProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -46,26 +47,24 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe """ buildFile << """ - plugins { - id 'elasticsearch.legacy-yaml-rest-compat-test' - } + apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' """ when: - def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest", '--stacktrace').build() + def result = gradleRunner("yamlRestCompatTest", '--stacktrace').build() then: - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE + result.task(":yamlRestCompatTest").outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE result.task(transformTask).outcome == TaskOutcome.NO_SOURCE } - def "yamlRestTestVxCompatTest executes and copies api and transforms tests from :bwc:maintenance"() { + def "yamlRestCompatTest executes and copies api and transforms tests from :bwc:staged"() { given: internalBuild() - subProject(":distribution:bwc:maintenance") << """ + subProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -90,7 +89,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe String wrongTest = "wrong_version.yml" String additionalTest = "additional_test.yml" setupRestResources([wrongApi], [wrongTest]) //setups up resources for current version, which should not be used for this test - String sourceSetName = "yamlRestTestV" + compatibleVersion + "Compat" + String sourceSetName = "yamlRestCompatTest" addRestTestsToProject([additionalTest], sourceSetName) //intentionally adding to yamlRestTest source set since the .classes are copied from there file("src/yamlRestTest/java/MockIT.java") << "import org.junit.Test;class MockIT { @Test public void doNothing() { }}" @@ -98,14 +97,14 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe String api = "foo.json" String test = "10_basic.yml" //add the compatible test and api files, these are the prior version's normal yaml rest tests - file("distribution/bwc/maintenance/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << "" - file("distribution/bwc/maintenance/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" + file("distribution/bwc/staged/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << "" + file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" when: - def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() + def result = gradleRunner("yamlRestCompatTest").build() then: - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(":yamlRestCompatTest").outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatApiTask').outcome == TaskOutcome.SUCCESS result.task(':copyRestCompatTestTask').outcome == TaskOutcome.SUCCESS result.task(transformTask).outcome == TaskOutcome.SUCCESS @@ -132,19 +131,20 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE when: - result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() + result = gradleRunner("yamlRestCompatTest").build() then: - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(":yamlRestCompatTest").outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatApiTask').outcome == TaskOutcome.UP_TO_DATE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.UP_TO_DATE result.task(transformTask).outcome == TaskOutcome.UP_TO_DATE } - def "yamlRestTestVxCompatTest is wired into check and checkRestCompat"() { + def "yamlRestCompatTest is wired into check and checkRestCompat"() { given: + internalBuild() withVersionCatalogue() - subProject(":distribution:bwc:maintenance") << """ + subProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -152,10 +152,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe """ buildFile << """ - plugins { - id 'elasticsearch.legacy-yaml-rest-compat-test' - } - + apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' """ when: @@ -164,7 +161,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe then: result.task(':check').outcome == TaskOutcome.UP_TO_DATE result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE + result.task(":yamlRestCompatTest").outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE result.task(transformTask).outcome == TaskOutcome.NO_SOURCE @@ -178,7 +175,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe then: result.task(':check').outcome == TaskOutcome.UP_TO_DATE result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(":yamlRestCompatTest").outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatApiTask').outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatTestTask').outcome == TaskOutcome.SKIPPED result.task(transformTask).outcome == TaskOutcome.SKIPPED @@ -188,7 +185,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe given: internalBuild() - subProject(":distribution:bwc:maintenance") << """ + subProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -204,7 +201,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe dependencies { yamlRestTestImplementation "junit:junit:4.12" } - tasks.named("yamlRestTestV${compatibleVersion}CompatTransform").configure({ task -> + tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("test/test/two", "This is a test to skip test two") task.replaceValueInMatch("_type", "_doc") task.replaceValueInMatch("_source.values", ["z", "x", "y"], "one") @@ -232,7 +229,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe setupRestResources([], []) - file("distribution/bwc/maintenance/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """ + file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """ "one": - do: do_.some.key_to_replace: @@ -279,7 +276,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe - match: {} """.stripIndent() when: - def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() + def result = gradleRunner("yamlRestCompatTest").build() then: @@ -302,22 +299,22 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe --- one: - do: - do_.some.key_that_was_replaced: - index: "test" - id: 1 - keyvalue : replacedkeyvalue do_.some.key_to_replace_in_two: no_change_here: "because it's not in test 'two'" warnings: - "warning1" - "warning2" headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" allowed_warnings: - "added allowed warning" allowed_warnings_regex: - "added allowed warning regex .* [0-9]" + do_.some.key_that_was_replaced: + index: "test" + id: 1 + keyvalue : "replacedkeyvalue" - match: _source.values: - "z" @@ -334,13 +331,14 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe - is_false: "replaced_value" - is_true: "value_not_to_replace" - is_false: "value_not_to_replace" - - length: { key.in_length_that_was_replaced: 1 } - - length: { value_to_replace: 99 } + - length: + key.in_length_that_was_replaced: 1 + - length: + value_to_replace: 99 - match: _source.added: name: "jake" likes: "cheese" - --- two: - skip: @@ -349,17 +347,17 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe get: index: "test2" id: 1 - do_.some.key_that_was_replaced_in_two: - changed_here: "because it is in test 'two'" headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" + Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" + Accept: "application/vnd.elasticsearch+json;compatible-with=8" warnings_regex: - "regex warning here .* [a-z]" allowed_warnings: - "added allowed warning" allowed_warnings_regex: - "added allowed warning regex .* [0-9]" + do_.some.key_that_was_replaced_in_two: + changed_here: "because it is in test 'two'" - match: _source.values: - "foo" @@ -371,12 +369,12 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe - is_false: "replaced_value" - is_true: "value_not_to_replace" - is_false: "value_not_to_replace" - - length: { value_not_to_replace: 1 } + - length: + value_not_to_replace: 1 --- "use cat with no header": - do: - cat.indices: - {} + cat.indices: {} allowed_warnings: - "added allowed warning" allowed_warnings_regex: @@ -384,7 +382,7 @@ class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTe - match: {} """.stripIndent()).readAll() - expectedAll.eachWithIndex{ ObjectNode expected, int i -> + expectedAll.eachWithIndex { ObjectNode expected, int i -> if(expected != actual.get(i)) { println("\nTransformed Test:") SequenceWriter sequenceWriter = WRITER.writeValues(System.out) diff --git a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/build-tools-internal/src/main/resources/minimumCompilerVersion b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/build-tools-internal/src/main/resources/minimumCompilerVersion index 98d9bcb75a68..aabe6ec3909c 100644 --- a/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/build-tools-internal/src/main/resources/minimumCompilerVersion +++ b/build-tools-internal/src/integTest/resources/org/elasticsearch/gradle/internal/fake_git/remote/build-tools-internal/src/main/resources/minimumCompilerVersion @@ -1 +1 @@ -17 +21 diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java index 41bfddb01e66..7b1bc14fda8a 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java @@ -7,8 +7,6 @@ */ package org.elasticsearch.gradle.internal; -import org.elasticsearch.gradle.Architecture; -import org.elasticsearch.gradle.ElasticsearchDistribution; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; @@ -27,7 +25,6 @@ import java.util.function.Predicate; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Collectors; import static java.util.Collections.unmodifiableList; @@ -67,7 +64,6 @@ public class BwcVersions { private static final Pattern LINE_PATTERN = Pattern.compile( "\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)?.*\\);" ); - private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); private static final String GLIBC_VERSION_ENV_VAR = "GLIBC_VERSION"; private final Version currentVersion; @@ -124,9 +120,7 @@ public UnreleasedVersionInfo unreleasedInfo(Version version) { } public void forPreviousUnreleased(Consumer consumer) { - filterSupportedVersions( - getUnreleased().stream().filter(version -> version.equals(currentVersion) == false).collect(Collectors.toList()) - ).stream().map(unreleased::get).forEach(consumer); + getUnreleased().stream().filter(version -> version.equals(currentVersion) == false).map(unreleased::get).forEach(consumer); } private String getBranchFor(Version version) { @@ -155,6 +149,7 @@ private Map computeUnreleased() { List unreleasedList = unreleased.stream().sorted(Comparator.reverseOrder()).toList(); Map result = new TreeMap<>(); + boolean newMinor = false; for (int i = 0; i < unreleasedList.size(); i++) { Version esVersion = unreleasedList.get(i); // This is either a new minor or staged release @@ -162,11 +157,17 @@ private Map computeUnreleased() { result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution")); } else if (esVersion.getRevision() == 0) { // If there are two upcoming unreleased minors then this one is the new minor - if (unreleasedList.get(i + 1).getRevision() == 0) { - result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:minor")); - } else { - result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:staged")); - } + if (newMinor == false && unreleasedList.get(i + 1).getRevision() == 0) { + result.put(esVersion, new UnreleasedVersionInfo(esVersion, esVersion.getMajor() + ".x", ":distribution:bwc:minor")); + newMinor = true; + } else if (newMinor == false + && unreleasedList.stream().filter(v -> v.getMajor() == esVersion.getMajor() && v.getRevision() == 0).count() == 1) { + // This is the only unreleased new minor which means we've not yet staged it for release + result.put(esVersion, new UnreleasedVersionInfo(esVersion, esVersion.getMajor() + ".x", ":distribution:bwc:minor")); + newMinor = true; + } else { + result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:staged")); + } } else { // If this is the oldest unreleased version and we have a maintenance release if (i == unreleasedList.size() - 1 && hasMaintenanceRelease) { @@ -222,20 +223,16 @@ public void compareToAuthoritative(List authoritativeReleasedVersions) } private List getReleased() { - return versions.stream().filter(v -> unreleased.containsKey(v) == false).toList(); + return versions.stream() + .filter(v -> v.getMajor() >= currentVersion.getMajor() - 1) + .filter(v -> unreleased.containsKey(v) == false) + .toList(); } /** - * Return versions of Elasticsearch which are index compatible with the current version, and also work on the local machine. + * Return versions of Elasticsearch which are index compatible with the current version. */ public List getIndexCompatible() { - return filterSupportedVersions(getAllIndexCompatible()); - } - - /** - * Return all versions of Elasticsearch which are index compatible with the current version. - */ - public List getAllIndexCompatible() { return versions.stream().filter(v -> v.getMajor() >= (currentVersion.getMajor() - 1)).toList(); } @@ -248,7 +245,7 @@ public void withIndexCompatible(Predicate filter, BiConsumer getWireCompatible() { - return filterSupportedVersions(versions.stream().filter(v -> v.compareTo(MINIMUM_WIRE_COMPATIBLE_VERSION) >= 0).toList()); + return versions.stream().filter(v -> v.compareTo(getMinimumWireCompatibleVersion()) >= 0).toList(); } public void withWireCompatible(BiConsumer versionAction) { @@ -259,20 +256,6 @@ public void withWireCompatible(Predicate filter, BiConsumer versionAction.accept(v, "v" + v.toString())); } - private List filterSupportedVersions(List wireCompat) { - Predicate supported = v -> true; - if (Architecture.current() == Architecture.AARCH64) { - final String version; - if (ElasticsearchDistribution.CURRENT_PLATFORM.equals(ElasticsearchDistribution.Platform.DARWIN)) { - version = "7.16.0"; - } else { - version = "7.12.0"; // linux shipped earlier for aarch64 - } - supported = v -> v.onOrAfter(version); - } - return wireCompat.stream().filter(supported).collect(Collectors.toList()); - } - public List getUnreleasedIndexCompatible() { List unreleasedIndexCompatible = new ArrayList<>(getIndexCompatible()); unreleasedIndexCompatible.retainAll(getUnreleased()); @@ -286,7 +269,17 @@ public List getUnreleasedWireCompatible() { } public Version getMinimumWireCompatibleVersion() { - return MINIMUM_WIRE_COMPATIBLE_VERSION; + // Determine minimum wire compatible version from list of known versions. + // Current BWC policy states the minimum wire compatible version is the last minor release or the previous major version. + return versions.stream() + .filter(v -> v.getRevision() == 0) + .filter(v -> v.getMajor() == currentVersion.getMajor() - 1) + .max(Comparator.naturalOrder()) + .orElseThrow(() -> new IllegalStateException("Unable to determine minimum wire compatible version.")); + } + + public Version getCurrentVersion() { + return currentVersion; } public record UnreleasedVersionInfo(Version version, String branch, String gradleProjectPath) {} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index 97172ec51e5b..3af59f00299b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -12,27 +12,40 @@ * This class models the different Docker base images that are used to build Docker distributions of Elasticsearch. */ public enum DockerBase { - DEFAULT("ubuntu:20.04", ""), + DEFAULT("ubuntu:20.04", "", "apt-get"), // "latest" here is intentional, since the image name specifies "8" - UBI("docker.elastic.co/ubi8/ubi-minimal:latest", "-ubi8"), + UBI("docker.elastic.co/ubi8/ubi-minimal:latest", "-ubi8", "microdnf"), // The Iron Bank base image is UBI (albeit hardened), but we are required to parameterize the Docker build - IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank"), + IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank", "yum"), // Base image with extras for Cloud - CLOUD("ubuntu:20.04", "-cloud"), + CLOUD("ubuntu:20.04", "-cloud", "apt-get"), // Based on CLOUD above, with more extras. We don't set a base image because // we programmatically extend from the Cloud image. - CLOUD_ESS(null, "-cloud-ess"); + CLOUD_ESS(null, "-cloud-ess", "apt-get"), + + // Chainguard based wolfi image with latest jdk + WOLFI( + "docker.elastic.co/wolfi/chainguard-base:latest@sha256:c16d3ad6cebf387e8dd2ad769f54320c4819fbbaa21e729fad087c7ae223b4d0", + "-wolfi", + "apk" + ); private final String image; private final String suffix; + private final String packageManager; DockerBase(String image, String suffix) { + this(image, suffix, "apt-get"); + } + + DockerBase(String image, String suffix, String packageManager) { this.image = image; this.suffix = suffix; + this.packageManager = packageManager; } public String getImage() { @@ -42,4 +55,8 @@ public String getImage() { public String getSuffix() { return suffix; } + + public String getPackageManager() { + return packageManager; + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java index f92789f70104..eeb4306ce6fb 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -177,6 +177,9 @@ private static String distributionProjectName(ElasticsearchDistribution distribu if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_CLOUD_ESS) { return projectName + "cloud-ess-docker" + archString + "-export"; } + if (distribution.getType() == InternalElasticsearchDistributionTypes.DOCKER_WOLFI) { + return projectName + "wolfi-docker" + archString + "-export"; + } return projectName + distribution.getType().getName(); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerWolfiElasticsearchDistributionType.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerWolfiElasticsearchDistributionType.java new file mode 100644 index 000000000000..d055337436a8 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/DockerWolfiElasticsearchDistributionType.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.distribution; + +import org.elasticsearch.gradle.ElasticsearchDistributionType; + +public class DockerWolfiElasticsearchDistributionType implements ElasticsearchDistributionType { + + DockerWolfiElasticsearchDistributionType() {} + + @Override + public String getName() { + return "dockerWolfi"; + } + + @Override + public boolean isDocker() { + return true; + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java index 0b6ef212a63d..5f8ef58e44a6 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/distribution/InternalElasticsearchDistributionTypes.java @@ -20,6 +20,7 @@ public class InternalElasticsearchDistributionTypes { public static ElasticsearchDistributionType DOCKER_IRONBANK = new DockerIronBankElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_CLOUD = new DockerCloudElasticsearchDistributionType(); public static ElasticsearchDistributionType DOCKER_CLOUD_ESS = new DockerCloudEssElasticsearchDistributionType(); + public static ElasticsearchDistributionType DOCKER_WOLFI = new DockerWolfiElasticsearchDistributionType(); public static List ALL_INTERNAL = List.of( DEB, @@ -28,6 +29,7 @@ public class InternalElasticsearchDistributionTypes { DOCKER_UBI, DOCKER_IRONBANK, DOCKER_CLOUD, - DOCKER_CLOUD_ESS + DOCKER_CLOUD_ESS, + DOCKER_WOLFI ); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index 42d3a770dbbc..5b1044bbb29a 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -52,6 +52,7 @@ import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_CLOUD_ESS; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_IRONBANK; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_UBI; +import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER_WOLFI; import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.RPM; /** @@ -93,6 +94,7 @@ public void apply(Project project) { for (ElasticsearchDistribution distribution : testDistributions) { String taskname = destructiveDistroTestTaskName(distribution); + ElasticsearchDistributionType type = distribution.getType(); TaskProvider destructiveTask = configureTestTask(project, taskname, distribution, t -> { t.onlyIf( "Docker is not available", @@ -106,12 +108,13 @@ public void apply(Project project) { if (distribution.getPlatform() == Platform.WINDOWS) { windowsTestTasks.add(destructiveTask); } else { - linuxTestTasks.computeIfAbsent(distribution.getType(), k -> new ArrayList<>()).add(destructiveTask); + linuxTestTasks.computeIfAbsent(type, k -> new ArrayList<>()).add(destructiveTask); } destructiveDistroTest.configure(t -> t.dependsOn(destructiveTask)); - lifecycleTasks.get(distribution.getType()).configure(t -> t.dependsOn(destructiveTask)); + TaskProvider lifecycleTask = lifecycleTasks.get(type); + lifecycleTask.configure(t -> t.dependsOn(destructiveTask)); - if ((distribution.getType() == DEB || distribution.getType() == RPM) && distribution.getBundledJdk()) { + if ((type == DEB || type == RPM) && distribution.getBundledJdk()) { for (Version version : BuildParams.getBwcVersions().getIndexCompatible()) { final ElasticsearchDistribution bwcDistro; if (version.equals(Version.fromString(distribution.getVersion()))) { @@ -121,7 +124,7 @@ public void apply(Project project) { bwcDistro = createDistro( allDistributions, distribution.getArchitecture(), - distribution.getType(), + type, distribution.getPlatform(), distribution.getBundledJdk(), version.toString() @@ -147,6 +150,7 @@ private static Map> lifecycleTask lifecyleTasks.put(DOCKER_IRONBANK, project.getTasks().register(taskPrefix + ".docker-ironbank")); lifecyleTasks.put(DOCKER_CLOUD, project.getTasks().register(taskPrefix + ".docker-cloud")); lifecyleTasks.put(DOCKER_CLOUD_ESS, project.getTasks().register(taskPrefix + ".docker-cloud-ess")); + lifecyleTasks.put(DOCKER_WOLFI, project.getTasks().register(taskPrefix + ".docker-wolfi")); lifecyleTasks.put(ARCHIVE, project.getTasks().register(taskPrefix + ".archives")); lifecyleTasks.put(DEB, project.getTasks().register(taskPrefix + ".packages")); lifecyleTasks.put(RPM, lifecyleTasks.get(DEB)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 77af3445f530..a170606800f3 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -78,6 +78,7 @@ public class RestTestBasePlugin implements Plugin { private static final String FEATURES_METADATA_CONFIGURATION = "featuresMetadataDeps"; private static final String DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION = "defaultDistrofeaturesMetadataDeps"; private static final String TESTS_FEATURES_METADATA_PATH = "tests.features.metadata.path"; + private static final String MINIMUM_WIRE_COMPATIBLE_VERSION_SYSPROP = "tests.minimum.wire.compatible"; private final ProviderFactory providerFactory; @@ -173,6 +174,9 @@ public void apply(Project project) { task.systemProperty("tests.security.manager", "false"); task.systemProperty("tests.system_call_filter", "false"); + // Pass minimum wire compatible version which is used by upgrade tests + task.systemProperty(MINIMUM_WIRE_COMPATIBLE_VERSION_SYSPROP, BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); + // Register plugins and modules as task inputs and pass paths as system properties to tests var modulePath = project.getObjects().fileCollection().from(modulesConfiguration); nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulePath::getAsPath); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java index e0581ebf6708..fd1446b5ff21 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java @@ -9,8 +9,8 @@ package org.elasticsearch.gradle.internal.test.rest.compat.compat; import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; +import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask; import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask; import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin; @@ -40,6 +40,7 @@ import java.io.File; import java.nio.file.Path; import java.util.Arrays; +import java.util.Comparator; import java.util.Map; import javax.inject.Inject; @@ -60,8 +61,7 @@ public abstract class AbstractYamlRestCompatTestPlugin implements Plugin v.getMajor() == currentMajor - 1) + .min(Comparator.reverseOrder()) + .get(); + String lastMinorProjectPath = BuildParams.getBwcVersions().unreleasedInfo(lastMinor).gradleProjectPath(); + // copy compatible rest specs Configuration bwcMinorConfig = project.getConfigurations().create(BWC_MINOR_CONFIG_NAME); - Dependency bwcMinor = project.getDependencies() - .project(Map.of("path", ":distribution:bwc:maintenance", "configuration", "checkout")); + Dependency bwcMinor = project.getDependencies().project(Map.of("path", lastMinorProjectPath, "configuration", "checkout")); project.getDependencies().add(bwcMinorConfig.getName(), bwcMinor); String projectPath = project.getPath(); @@ -183,7 +192,7 @@ public void apply(Project project) { // transform the copied tests task TaskProvider transformCompatTestTask = project.getTasks() - .register("yamlRestTestV" + COMPATIBLE_VERSION + "CompatTransform", RestCompatTestTransformTask.class, task -> { + .register("yamlRestCompatTestTransform", RestCompatTestTransformTask.class, task -> { task.getSourceDirectory().set(copyCompatYamlTestTask.flatMap(CopyRestTestsTask::getOutputResourceDir)); task.getOutputDirectory() .set(project.getLayout().getBuildDirectory().dir(compatTestsDir.resolve("transformed").toString())); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java index e84c84cc426a..0bff8d65586d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java @@ -34,7 +34,7 @@ public LegacyYamlRestCompatTestPlugin(ProjectLayout projectLayout, FileOperation @Override public TaskProvider registerTestTask(Project project, SourceSet sourceSet) { - return RestTestUtil.registerTestTask(project, sourceSet, sourceSet.getTaskName(null, "test")); + return RestTestUtil.registerTestTask(project, sourceSet, sourceSet.getName()); } @Override diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java index 79588ca722ff..b376284761ff 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java @@ -32,7 +32,7 @@ public YamlRestCompatTestPlugin(ProjectLayout projectLayout, FileOperations file @Override public TaskProvider registerTestTask(Project project, SourceSet sourceSet) { - return RestTestUtil.registerTestTask(project, sourceSet, sourceSet.getTaskName(null, "test"), StandaloneRestIntegTestTask.class); + return RestTestUtil.registerTestTask(project, sourceSet, sourceSet.getName(), StandaloneRestIntegTestTask.class); } @Override diff --git a/build-tools-internal/src/main/resources/minimumCompilerVersion b/build-tools-internal/src/main/resources/minimumCompilerVersion index 98d9bcb75a68..aabe6ec3909c 100644 --- a/build-tools-internal/src/main/resources/minimumCompilerVersion +++ b/build-tools-internal/src/main/resources/minimumCompilerVersion @@ -1 +1 @@ -17 +21 diff --git a/build-tools-internal/src/main/resources/minimumRuntimeVersion b/build-tools-internal/src/main/resources/minimumRuntimeVersion index 98d9bcb75a68..aabe6ec3909c 100644 --- a/build-tools-internal/src/main/resources/minimumRuntimeVersion +++ b/build-tools-internal/src/main/resources/minimumRuntimeVersion @@ -1 +1 @@ -17 +21 diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy index 39a9af38e6a9..8fa1ac9ea209 100644 --- a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy @@ -10,12 +10,9 @@ package org.elasticsearch.gradle.internal import spock.lang.Specification -import org.elasticsearch.gradle.Architecture -import org.elasticsearch.gradle.ElasticsearchDistribution import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.BwcVersions.UnreleasedVersionInfo - class BwcVersionsSpec extends Specification { List versionLines = [] @@ -42,11 +39,12 @@ class BwcVersionsSpec extends Specification { unreleased == [ (v('7.16.2')): new UnreleasedVersionInfo(v('7.16.2'), '7.16', ':distribution:bwc:bugfix'), (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.17', ':distribution:bwc:staged'), - (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.0', ':distribution:bwc:minor'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.x', ':distribution:bwc:minor'), (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') ] bwc.wireCompatible == [v('7.17.0'), v('8.0.0'), v('8.1.0')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.16.2'), v('7.17.0'), v('8.0.0'), v('8.1.0')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.16.2'), v('7.17.0'), v('8.0.0'), v('8.1.0')] + bwc.minimumWireCompatibleVersion == v('7.17.0') } def "current version is next minor with next major and last minor both staged"() { @@ -71,11 +69,11 @@ class BwcVersionsSpec extends Specification { unreleased == [ (v('7.16.1')): new UnreleasedVersionInfo(v('7.16.1'), '7.16', ':distribution:bwc:bugfix'), (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.17', ':distribution:bwc:staged'), - (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.0', ':distribution:bwc:minor'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.x', ':distribution:bwc:minor'), (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') ] bwc.wireCompatible == [v('7.17.0'), v('8.0.0'), v('8.1.0')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('8.0.0'), v('8.1.0')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('8.0.0'), v('8.1.0')] } def "current is next minor with upcoming minor staged"() { @@ -104,7 +102,7 @@ class BwcVersionsSpec extends Specification { (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') ] bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.1.0')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.1.0')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.1.0')] } def "current version is staged major"() { @@ -131,7 +129,61 @@ class BwcVersionsSpec extends Specification { (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'main', ':distribution'), ] bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0')] + } + + def "current version is major with unreleased next minor"() { + given: + addVersion('7.14.0', '8.9.0') + addVersion('7.14.1', '8.9.0') + addVersion('7.14.2', '8.9.0') + addVersion('7.15.0', '8.9.0') + addVersion('7.15.1', '8.9.0') + addVersion('7.15.2', '8.9.0') + addVersion('7.16.0', '8.10.0') + addVersion('7.16.1', '8.10.0') + addVersion('7.17.0', '8.10.0') + addVersion('8.0.0', '9.0.0') + + when: + def bwc = new BwcVersions(versionLines, v('8.0.0')) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('7.16.1')): new UnreleasedVersionInfo(v('7.16.1'), '7.16', ':distribution:bwc:bugfix'), + (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.x', ':distribution:bwc:minor'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'main', ':distribution'), + ] + bwc.wireCompatible == [v('7.17.0'), v('8.0.0')] + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('8.0.0')] + } + + def "current version is major with staged next minor"() { + given: + addVersion('7.14.0', '8.9.0') + addVersion('7.14.1', '8.9.0') + addVersion('7.14.2', '8.9.0') + addVersion('7.15.0', '8.9.0') + addVersion('7.15.1', '8.9.0') + addVersion('7.15.2', '8.9.0') + addVersion('7.16.0', '8.10.0') + addVersion('7.17.0', '8.10.0') + addVersion('8.0.0', '9.0.0') + + when: + def bwc = new BwcVersions(versionLines, v('8.0.0')) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('7.15.2')): new UnreleasedVersionInfo(v('7.15.2'), '7.15', ':distribution:bwc:bugfix'), + (v('7.16.0')): new UnreleasedVersionInfo(v('7.16.0'), '7.16', ':distribution:bwc:staged'), + (v('7.17.0')): new UnreleasedVersionInfo(v('7.17.0'), '7.x', ':distribution:bwc:minor'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'main', ':distribution'), + ] + bwc.wireCompatible == [v('7.17.0'), v('8.0.0')] + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.17.0'), v('8.0.0')] } def "current version is next bugfix"() { @@ -159,7 +211,7 @@ class BwcVersionsSpec extends Specification { (v('8.0.1')): new UnreleasedVersionInfo(v('8.0.1'), 'main', ':distribution'), ] bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1')] } def "current version is next minor with no staged releases"() { @@ -189,7 +241,7 @@ class BwcVersionsSpec extends Specification { (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'main', ':distribution') ] bwc.wireCompatible == [v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1'), v('8.1.0')] - bwc.indexCompatible == osFiltered([v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1'), v('8.1.0')]) + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('7.17.0'), v('7.17.1'), v('8.0.0'), v('8.0.1'), v('8.1.0')] } private void addVersion(String elasticsearch, String lucene) { @@ -202,12 +254,4 @@ class BwcVersionsSpec extends Specification { return Version.fromString(version) } - private boolean osxAarch64() { - Architecture.current() == Architecture.AARCH64 && - ElasticsearchDistribution.CURRENT_PLATFORM.equals(ElasticsearchDistribution.Platform.DARWIN) - } - - private List osFiltered(ArrayList versions) { - return osxAarch64() ? versions.findAll {it.onOrAfter("7.16.0")} : versions - } } diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 1dd9fb95bd17..edb97a2968bc 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,4 +1,4 @@ -elasticsearch = 8.16.0 +elasticsearch = 9.0.0 lucene = 9.11.1 bundled_jdk_vendor = openjdk diff --git a/build.gradle b/build.gradle index 01fdace570ce..8430ac335d44 100644 --- a/build.gradle +++ b/build.gradle @@ -135,7 +135,7 @@ tasks.register("updateCIBwcVersions") { } doLast { - writeVersions(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.allIndexCompatible)) + writeVersions(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible)) writeVersions(file(".ci/snapshotBwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.unreleasedIndexCompatible)) expandBwcList( ".buildkite/pipelines/intake.yml", @@ -149,7 +149,7 @@ tasks.register("updateCIBwcVersions") { new ListExpansion(versions: filterIntermediatePatches(BuildParams.bwcVersions.unreleasedIndexCompatible), variable: "BWC_LIST"), ], [ - new StepExpansion(templatePath: ".buildkite/pipelines/periodic.bwc.template.yml", versions: filterIntermediatePatches(BuildParams.bwcVersions.allIndexCompatible), variable: "BWC_STEPS"), + new StepExpansion(templatePath: ".buildkite/pipelines/periodic.bwc.template.yml", versions: filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible), variable: "BWC_STEPS"), ] ) @@ -157,7 +157,7 @@ tasks.register("updateCIBwcVersions") { ".buildkite/pipelines/periodic-packaging.yml", ".buildkite/pipelines/periodic-packaging.template.yml", ".buildkite/pipelines/periodic-packaging.bwc.template.yml", - filterIntermediatePatches(BuildParams.bwcVersions.allIndexCompatible) + filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible) ) } } @@ -186,7 +186,7 @@ tasks.register("verifyVersions") { .collect { Version.fromString(it) } ) } - verifyCiYaml(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.allIndexCompatible)) + verifyCiYaml(file(".ci/bwcVersions"), filterIntermediatePatches(BuildParams.bwcVersions.indexCompatible)) verifyCiYaml(file(".ci/snapshotBwcVersions"), BuildParams.bwcVersions.unreleasedIndexCompatible) // Make sure backport bot config file is up to date diff --git a/distribution/docker/README.md b/distribution/docker/README.md index 4c8052cfc26b..eb0e7b296097 100644 --- a/distribution/docker/README.md +++ b/distribution/docker/README.md @@ -6,11 +6,13 @@ the [DockerBase] enum. * Default - this is what most people use, and is based on Ubuntu * UBI - the same as the default image, but based upon [RedHat's UBI images][ubi], specifically their minimal flavour. + * Wolfi - the same as the default image, but based upon [Wolfi](https://github.com/wolfi-dev) * Iron Bank - this is the US Department of Defence's repository of digitally signed, binary container images including both Free and Open-Source software (FOSS) and Commercial off-the-shelf (COTS). In practice, this is another UBI build, this time on the regular UBI image, with extra hardening. See below for more details. + * Cloud - this is mostly the same as the default image, with some notable differences: * `filebeat` and `metricbeat` are included * `wget` is included diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index 85e66ccba34b..30974ed2396a 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -21,8 +21,6 @@ apply plugin: 'elasticsearch.dra-artifacts' String buildId = providers.systemProperty('build.id').getOrNull() boolean useLocalArtifacts = buildId != null && buildId.isBlank() == false && useDra == false - - repositories { // Define a repository that allows Gradle to fetch a resource from GitHub. This // is only used to fetch the `tini` binary, when building the Iron Bank docker image @@ -131,7 +129,7 @@ ext.expansions = { Architecture architecture, DockerBase base -> 'config_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'config', 'git_revision' : BuildParams.gitRevision, 'license' : base == DockerBase.IRON_BANK ? 'Elastic License 2.0' : 'Elastic-License-2.0', - 'package_manager' : base == DockerBase.IRON_BANK ? 'yum' : (base == DockerBase.UBI ? 'microdnf' : 'apt-get'), + 'package_manager' : base.packageManager, 'docker_base' : base.name().toLowerCase(), 'version' : VersionProperties.elasticsearch, 'major_minor_version': "${major}.${minor}", @@ -182,21 +180,12 @@ ext.dockerBuildContext = { Architecture architecture, DockerBase base -> from projectDir.resolve("src/docker/config") } } - from(projectDir.resolve("src/docker/Dockerfile")) { expand(varExpansions) filter SquashNewlinesFilter } } } -// -//def createAndSetWritable(Object... locations) { -// locations.each { location -> -// File file = file(location) -// file.mkdirs() -// file.setWritable(true, false) -// } -//} tasks.register("copyNodeKeyMaterial", Sync) { def certsDir = file("build/certs") @@ -526,6 +515,8 @@ subprojects { Project subProject -> base = DockerBase.CLOUD_ESS } else if (subProject.name.contains('cloud-')) { base = DockerBase.CLOUD + } else if (subProject.name.contains('wolfi-')) { + base = DockerBase.WOLFI } final String arch = architecture == Architecture.AARCH64 ? '-aarch64' : '' @@ -533,7 +524,8 @@ subprojects { Project subProject -> (base == DockerBase.IRON_BANK ? 'ironbank.tar' : (base == DockerBase.CLOUD ? 'cloud.tar' : (base == DockerBase.CLOUD_ESS ? 'cloud-ess.tar' : - 'docker.tar'))) + (base == DockerBase.WOLFI ? 'wolfi.tar' : + 'docker.tar')))) final String artifactName = "elasticsearch${arch}${base.suffix}_test" final String exportTaskName = taskName("export", architecture, base, 'DockerImage') diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index 2a2a77a6df82..47f79749cbef 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -43,29 +43,34 @@ RUN chmod 0555 /bin/tini # Install required packages to extract the Elasticsearch distribution <% if (docker_base == 'default' || docker_base == 'cloud') { %> RUN <%= retry.loop(package_manager, "${package_manager} update && DEBIAN_FRONTEND=noninteractive ${package_manager} install -y curl ") %> +<% } else if (docker_base == "wolfi") { %> +RUN <%= retry.loop(package_manager, "export DEBIAN_FRONTEND=noninteractive && ${package_manager} update && ${package_manager} update && ${package_manager} add --no-cache curl") %> <% } else { %> RUN <%= retry.loop(package_manager, "${package_manager} install -y findutils tar gzip") %> <% } %> -# `tini` is a tiny but valid init for containers. This is used to cleanly -# control how ES and any child processes are shut down. -# -# The tini GitHub page gives instructions for verifying the binary using -# gpg, but the keyservers are slow to return the key and this can fail the -# build. Instead, we check the binary against the published checksum. -RUN set -eux ; \\ - tini_bin="" ; \\ - case "\$(arch)" in \\ - aarch64) tini_bin='tini-arm64' ;; \\ - x86_64) tini_bin='tini-amd64' ;; \\ - *) echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ;; \\ - esac ; \\ - curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v0.19.0/\${tini_bin} ; \\ - curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v0.19.0/\${tini_bin}.sha256sum ; \\ - sha256sum -c \${tini_bin}.sha256sum ; \\ - rm \${tini_bin}.sha256sum ; \\ - mv \${tini_bin} /bin/tini ; \\ - chmod 0555 /bin/tini +<% if (docker_base != 'wolfi') { %> + # `tini` is a tiny but valid init for containers. This is used to cleanly + # control how ES and any child processes are shut down. + # For wolfi we pick it from the blessed wolfi package registry. + # + # The tini GitHub page gives instructions for verifying the binary using + # gpg, but the keyservers are slow to return the key and this can fail the + # build. Instead, we check the binary against the published checksum. + RUN set -eux ; \\ + tini_bin="" ; \\ + case "\$(arch)" in \\ + aarch64) tini_bin='tini-arm64' ;; \\ + x86_64) tini_bin='tini-amd64' ;; \\ + *) echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ;; \\ + esac ; \\ + curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v0.19.0/\${tini_bin} ; \\ + curl --retry 10 -S -L -O https://github.com/krallin/tini/releases/download/v0.19.0/\${tini_bin}.sha256sum ; \\ + sha256sum -c \${tini_bin}.sha256sum ; \\ + rm \${tini_bin}.sha256sum ; \\ + mv \${tini_bin} /bin/tini ; \\ + chmod 0555 /bin/tini +<% } %> <% } %> @@ -152,6 +157,15 @@ RUN ${package_manager} update --setopt=tsflags=nodocs -y && \\ nc shadow-utils zip findutils unzip procps-ng && \\ ${package_manager} clean all +<% } else if (docker_base == "wolfi") { %> +RUN <%= retry.loop(package_manager, + "export DEBIAN_FRONTEND=noninteractive && \n" + + " ${package_manager} update && \n" + + " ${package_manager} upgrade && \n" + + " ${package_manager} add --no-cache \n" + + " bash ca-certificates curl libsystemd netcat-openbsd p11-kit p11-kit-trust shadow tini unzip zip zstd && \n" + + " rm -rf /var/cache/apk/* " + ) %> <% } else if (docker_base == "default" || docker_base == "cloud") { %> # Change default shell to bash, then install required packages with retries. @@ -185,6 +199,11 @@ RUN groupadd -g 1000 elasticsearch && \\ adduser --uid 1000 --gid 1000 --home /usr/share/elasticsearch elasticsearch && \\ adduser elasticsearch root && \\ chown -R 0:0 /usr/share/elasticsearch +<% } else if (docker_base == "wolfi") { %> +RUN groupadd -g 1000 elasticsearch && \ + adduser -G elasticsearch -u 1000 elasticsearch -D --home /usr/share/elasticsearch elasticsearch && \ + adduser elasticsearch root && \ + chown -R 0:0 /usr/share/elasticsearch <% } else { %> RUN groupadd -g 1000 elasticsearch && \\ adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \\ @@ -196,7 +215,9 @@ ENV ELASTIC_CONTAINER true WORKDIR /usr/share/elasticsearch COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearch +<% if (docker_base != "wolfi") { %> COPY --from=builder --chown=0:0 /bin/tini /bin/tini +<% } %> <% if (docker_base == 'cloud') { %> COPY --from=builder --chown=0:0 /opt /opt @@ -280,7 +301,12 @@ CMD ["/app/elasticsearch.sh"] RUN mkdir /app && \\ echo -e '#!/bin/bash\\nexec /usr/local/bin/docker-entrypoint.sh eswrapper' > /app/elasticsearch.sh && \\ chmod 0555 /app/elasticsearch.sh - +<% } else if (docker_base == "wolfi") { %> +# Our actual entrypoint is `tini`, a minimal but functional init program. It +# calls the entrypoint we provide, while correctly forwarding signals. +ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/docker-entrypoint.sh"] +# Dummy overridable parameter parsed by entrypoint +CMD ["eswrapper"] <% } else { %> # Our actual entrypoint is `tini`, a minimal but functional init program. It # calls the entrypoint we provide, while correctly forwarding signals. diff --git a/distribution/docker/wolfi-docker-aarch64-export/build.gradle b/distribution/docker/wolfi-docker-aarch64-export/build.gradle new file mode 100644 index 000000000000..537b5a093683 --- /dev/null +++ b/distribution/docker/wolfi-docker-aarch64-export/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// export is done in the parent project. diff --git a/distribution/docker/wolfi-docker-export/build.gradle b/distribution/docker/wolfi-docker-export/build.gradle new file mode 100644 index 000000000000..537b5a093683 --- /dev/null +++ b/distribution/docker/wolfi-docker-export/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// export is done in the parent project. diff --git a/docs/changelog/107936.yaml b/docs/changelog/107936.yaml new file mode 100644 index 000000000000..89dd57f7a81a --- /dev/null +++ b/docs/changelog/107936.yaml @@ -0,0 +1,6 @@ +pr: 107936 +summary: Two empty mappings now are created equally +area: Mapping +type: bug +issues: + - 107031 diff --git a/docs/changelog/109193.yaml b/docs/changelog/109193.yaml new file mode 100644 index 000000000000..5cc664eaee2c --- /dev/null +++ b/docs/changelog/109193.yaml @@ -0,0 +1,6 @@ +pr: 109193 +summary: "[ES|QL] explicit cast a string literal to `date_period` and `time_duration`\ + \ in arithmetic operations" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/111972.yaml b/docs/changelog/111972.yaml index 58477c68f0e7..a5bfcd5b0882 100644 --- a/docs/changelog/111972.yaml +++ b/docs/changelog/111972.yaml @@ -5,11 +5,13 @@ type: feature issues: [] highlight: title: Add global retention in data stream lifecycle - body: "Data stream lifecycle now supports configuring retention on a cluster level,\ - \ namely global retention. Global retention \nallows us to configure two different\ - \ retentions:\n\n- `data_streams.lifecycle.retention.default` is applied to all\ - \ data streams managed by the data stream lifecycle that do not have retention\n\ - defined on the data stream level.\n- `data_streams.lifecycle.retention.max` is\ - \ applied to all data streams managed by the data stream lifecycle and it allows\ - \ any data stream \ndata to be deleted after the `max_retention` has passed." + body: |- + Data stream lifecycle now supports configuring retention on a cluster level, + namely global retention. Global retention \nallows us to configure two different + retentions: + + - `data_streams.lifecycle.retention.default` is applied to all data streams managed + by the data stream lifecycle that do not have retention defined on the data stream level. + - `data_streams.lifecycle.retention.max` is applied to all data streams managed by the + data stream lifecycle and it allows any data stream \ndata to be deleted after the `max_retention` has passed. notable: true diff --git a/docs/changelog/111981.yaml b/docs/changelog/111981.yaml new file mode 100644 index 000000000000..13b8fe4b7e38 --- /dev/null +++ b/docs/changelog/111981.yaml @@ -0,0 +1,6 @@ +pr: 111981 +summary: Allow fields with dots in sparse vector field mapper +area: Mapping +type: enhancement +issues: + - 109118 diff --git a/docs/changelog/112055.yaml b/docs/changelog/112055.yaml new file mode 100644 index 000000000000..cdf15b3b3746 --- /dev/null +++ b/docs/changelog/112055.yaml @@ -0,0 +1,6 @@ +pr: 112055 +summary: "ESQL: `mv_median_absolute_deviation` function" +area: ES|QL +type: feature +issues: + - 111590 diff --git a/docs/changelog/112282.yaml b/docs/changelog/112282.yaml new file mode 100644 index 000000000000..beea119b06ae --- /dev/null +++ b/docs/changelog/112282.yaml @@ -0,0 +1,6 @@ +pr: 112282 +summary: Adds example plugin for custom ingest processor +area: Ingest Node +type: enhancement +issues: + - 111539 diff --git a/docs/changelog/112294.yaml b/docs/changelog/112294.yaml new file mode 100644 index 000000000000..71ce9eeef584 --- /dev/null +++ b/docs/changelog/112294.yaml @@ -0,0 +1,8 @@ +pr: 112294 +summary: "Use fallback synthetic source for `copy_to` and doc_values: false cases" +area: Mapping +type: enhancement +issues: + - 110753 + - 110038 + - 109546 diff --git a/docs/changelog/112330.yaml b/docs/changelog/112330.yaml new file mode 100644 index 000000000000..498698f5175b --- /dev/null +++ b/docs/changelog/112330.yaml @@ -0,0 +1,5 @@ +pr: 112330 +summary: Add links to network disconnect troubleshooting +area: Network +type: enhancement +issues: [] diff --git a/docs/changelog/112337.yaml b/docs/changelog/112337.yaml new file mode 100644 index 000000000000..f7d667e23cfe --- /dev/null +++ b/docs/changelog/112337.yaml @@ -0,0 +1,5 @@ +pr: 112337 +summary: Add workaround for missing shard gen blob +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/112348.yaml b/docs/changelog/112348.yaml new file mode 100644 index 000000000000..84110a7cd4f1 --- /dev/null +++ b/docs/changelog/112348.yaml @@ -0,0 +1,6 @@ +pr: 112348 +summary: Introduce repository integrity verification API +area: Snapshot/Restore +type: enhancement +issues: + - 52622 diff --git a/docs/changelog/112350.yaml b/docs/changelog/112350.yaml new file mode 100644 index 000000000000..994cd3a65c63 --- /dev/null +++ b/docs/changelog/112350.yaml @@ -0,0 +1,5 @@ +pr: 112350 +summary: "[ESQL] Add `SPACE` function" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/112401.yaml b/docs/changelog/112401.yaml new file mode 100644 index 000000000000..65e9e76ac25f --- /dev/null +++ b/docs/changelog/112401.yaml @@ -0,0 +1,6 @@ +pr: 112401 +summary: "ESQL: Fix CASE when conditions are multivalued" +area: ES|QL +type: bug +issues: + - 112359 diff --git a/docs/changelog/112409.yaml b/docs/changelog/112409.yaml new file mode 100644 index 000000000000..bad94b9f5f2b --- /dev/null +++ b/docs/changelog/112409.yaml @@ -0,0 +1,6 @@ +pr: 112409 +summary: Include reason when no nodes are found +area: "Transform" +type: bug +issues: + - 112404 diff --git a/docs/changelog/112444.yaml b/docs/changelog/112444.yaml new file mode 100644 index 000000000000..bfa4fd693f0e --- /dev/null +++ b/docs/changelog/112444.yaml @@ -0,0 +1,6 @@ +pr: 112444 +summary: Full coverage of ECS by ecs@mappings when `date_detection` is disabled +area: Mapping +type: bug +issues: + - 112398 diff --git a/docs/changelog/112451.yaml b/docs/changelog/112451.yaml new file mode 100644 index 000000000000..aa852cf5e2a1 --- /dev/null +++ b/docs/changelog/112451.yaml @@ -0,0 +1,29 @@ +pr: 112451 +summary: Update data stream lifecycle telemetry to track global retention +area: Data streams +type: breaking +issues: [] +breaking: + title: Update data stream lifecycle telemetry to track global retention + area: REST API + details: |- + In this release we introduced global retention settings that fulfil the following criteria: + + - a data stream managed by the data stream lifecycle, + - a data stream that is not an internal data stream. + + As a result, we defined different types of retention: + + - **data retention**: the retention configured on data stream level by the data stream user or owner + - **default global retention:** the retention configured by an admin on a cluster level and applied to any + data stream that doesn't have data retention and fulfils the criteria. + - **max global retention:** the retention configured by an admin to guard against having long retention periods. + Any data stream that fulfills the criteria will adhere to the data retention unless it exceeds the max retention, + in which case the max global retention applies. + - **effective retention:** the retention that applies on the data stream that fulfill the criteria at a given moment + in time. It takes into consideration all the retention above and resolves it to the retention that will take effect. + + Considering the above changes, having a field named `retention` in the usage API was confusing. For this reason, we + renamed it to `data_retention` and added telemetry about the other configurations too. + impact: Users that use the field `data_lifecycle.retention` should use the `data_lifecycle.data_retention` + notable: false diff --git a/docs/changelog/112547.yaml b/docs/changelog/112547.yaml new file mode 100644 index 000000000000..7f42f2a82976 --- /dev/null +++ b/docs/changelog/112547.yaml @@ -0,0 +1,5 @@ +pr: 112547 +summary: Remove reduce and `reduceContext` from `DelayedBucket` +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/112574.yaml b/docs/changelog/112574.yaml new file mode 100644 index 000000000000..3111697a8b97 --- /dev/null +++ b/docs/changelog/112574.yaml @@ -0,0 +1,5 @@ +pr: 112574 +summary: Add privileges required for CDR misconfiguration features to work on AWS SecurityHub integration +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/112581.yaml b/docs/changelog/112581.yaml new file mode 100644 index 000000000000..489b4780c06f --- /dev/null +++ b/docs/changelog/112581.yaml @@ -0,0 +1,5 @@ +pr: 112581 +summary: Fix missing header in `put_geoip_database` JSON spec +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/112610.yaml b/docs/changelog/112610.yaml new file mode 100644 index 000000000000..3d67a80a8f0b --- /dev/null +++ b/docs/changelog/112610.yaml @@ -0,0 +1,6 @@ +pr: 112610 +summary: Support widening of numeric types in union-types +area: ES|QL +type: bug +issues: + - 111277 diff --git a/docs/changelog/112612.yaml b/docs/changelog/112612.yaml new file mode 100644 index 000000000000..d6037e34ff17 --- /dev/null +++ b/docs/changelog/112612.yaml @@ -0,0 +1,5 @@ +pr: 112612 +summary: Set `replica_unassigned_buffer_time` in constructor +area: Health +type: bug +issues: [] diff --git a/docs/changelog/112649.yaml b/docs/changelog/112649.yaml new file mode 100644 index 000000000000..e3cf1e8e3488 --- /dev/null +++ b/docs/changelog/112649.yaml @@ -0,0 +1,5 @@ +pr: 112649 +summary: Allowlist `tracestate` header on remote server port +area: Security +type: bug +issues: [] diff --git a/docs/changelog/112687.yaml b/docs/changelog/112687.yaml new file mode 100644 index 000000000000..dd079e1b700c --- /dev/null +++ b/docs/changelog/112687.yaml @@ -0,0 +1,5 @@ +pr: 112687 +summary: Add `TaskManager` to `pluginServices` +area: Infra/Metrics +type: enhancement +issues: [] diff --git a/docs/changelog/112703.yaml b/docs/changelog/112703.yaml new file mode 100644 index 000000000000..a428e8c4e233 --- /dev/null +++ b/docs/changelog/112703.yaml @@ -0,0 +1,5 @@ +pr: 112703 +summary: JSON parse failures should be 4xx codes +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/112707.yaml b/docs/changelog/112707.yaml new file mode 100644 index 000000000000..9f16cfcd2b6f --- /dev/null +++ b/docs/changelog/112707.yaml @@ -0,0 +1,5 @@ +pr: 112707 +summary: Deduplicate `BucketOrder` when deserializing +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/112713.yaml b/docs/changelog/112713.yaml new file mode 100644 index 000000000000..1ccf451b13f8 --- /dev/null +++ b/docs/changelog/112713.yaml @@ -0,0 +1,5 @@ +pr: 112713 +summary: Fix encoding of dynamic arrays in ignored source +area: Logs +type: bug +issues: [] diff --git a/docs/changelog/112720.yaml b/docs/changelog/112720.yaml new file mode 100644 index 000000000000..a44ea5a69952 --- /dev/null +++ b/docs/changelog/112720.yaml @@ -0,0 +1,5 @@ +pr: 112720 +summary: Fix NPE in `dense_vector` stats +area: Vector Search +type: bug +issues: [] diff --git a/docs/plugins/development/creating-classic-plugins.asciidoc b/docs/plugins/development/creating-classic-plugins.asciidoc index f3f62a11f299..cc03ad51275f 100644 --- a/docs/plugins/development/creating-classic-plugins.asciidoc +++ b/docs/plugins/development/creating-classic-plugins.asciidoc @@ -32,12 +32,13 @@ for the plugin. If you need other resources, package them into a resources JAR. The {es} repository contains {es-repo}tree/main/plugins/examples[examples of plugins]. Some of these include: * a plugin with {es-repo}tree/main/plugins/examples/custom-settings[custom settings] +* a plugin with a {es-repo}tree/main/plugins/examples/custom-processor[custom ingest processor] * adding {es-repo}tree/main/plugins/examples/rest-handler[custom rest endpoints] * adding a {es-repo}tree/main/plugins/examples/rescore[custom rescorer] * a script {es-repo}tree/main/plugins/examples/script-expert-scoring[implemented in Java] These examples provide the bare bones needed to get started. For more -information about how to write a plugin, we recommend looking at the +information about how to write a plugin, we recommend looking at the {es-repo}tree/main/plugins/[source code of existing plugins] for inspiration. [discrete] @@ -88,4 +89,4 @@ for more information. [[plugin-descriptor-file-classic]] ==== The plugin descriptor file for classic plugins -include::plugin-descriptor-file.asciidoc[] \ No newline at end of file +include::plugin-descriptor-file.asciidoc[] diff --git a/docs/plugins/install_remove.asciidoc b/docs/plugins/install_remove.asciidoc index c9d163fb30ef..893af4dac42f 100644 --- a/docs/plugins/install_remove.asciidoc +++ b/docs/plugins/install_remove.asciidoc @@ -4,7 +4,7 @@ ifeval::["{release-state}"=="unreleased"] -WARNING: Version {version} of the Elastic Stack has not yet been released. +WARNING: Version {version} of the Elastic Stack has not yet been released. The plugin might not be available. endif::[] diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index fc5b01f9234e..5f329c00efd7 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -50,16 +50,16 @@ Valid columns are: (Default) IP address, such as `127.0.1.1`. `heap.percent`, `hp`, `heapPercent`:: -(Default) Maximum configured heap, such as `7`. +(Default) Used percentage of total allocated Elasticsearch JVM heap, such as `7`. This reflects only the {es} process running within the operating system and is the most direct indicator of its JVM/heap/memory resource performance. `heap.max`, `hm`, `heapMax`:: -(Default) Total heap, such as `4gb`. +Total heap, such as `4gb`. `ram.percent`, `rp`, `ramPercent`:: -(Default) Used total memory percentage, such as `47`. +(Default) Used percentage of total operating system's memory, such as `47`. This reflects all processes running on operating system instead of only {es} and is not guaranteed to correlate to its performance. `file_desc.percent`, `fdp`, `fileDescriptorPercent`:: -(Default) Used file descriptors percentage, such as `1`. +Used file descriptors percentage, such as `1`. `node.role`, `r`, `role`, `nodeRole`:: (Default) Roles of the node. @@ -138,16 +138,16 @@ Used file descriptors, such as `123`. Maximum number of file descriptors, such as `1024`. `cpu`:: -Recent system CPU usage as percent, such as `12`. +(Default) Recent system CPU usage as percent, such as `12`. `load_1m`, `l`:: -Most recent load average, such as `0.22`. +(Default) Most recent load average, such as `0.22`. `load_5m`, `l`:: -Load average for the last five minutes, such as `0.78`. +(Default) Load average for the last five minutes, such as `0.78`. `load_15m`, `l`:: -Load average for the last fifteen minutes, such as `1.24`. +(Default) Load average for the last fifteen minutes, such as `1.24`. `uptime`, `u`:: Node uptime, such as `17.3m`. diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index c39bc0dcd287..575a6457804a 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -1307,6 +1307,142 @@ Each repository type may also include other statistics about the repositories of ==== +`ccs`:: +(object) Contains information relating to <> settings and activity in the cluster. ++ +.Properties of `ccs` +[%collapsible%open] +===== + + +`_search`::: +(object) Contains the telemetry information about the <> usage in the cluster. ++ +.Properties of `_search` +[%collapsible%open] +====== +`total`::: +(integer) The total number of {ccs} requests that have been executed by the cluster. + +`success`::: +(integer) The total number of {ccs} requests that have been successfully executed by the cluster. + +`skipped`::: +(integer) The total number of {ccs} requests (successful or failed) that had at least one remote cluster skipped. + +`took`::: +(object) Contains statistics about the time taken to execute {ccs} requests. ++ +.Properties of `took` +[%collapsible%open] +======= +`max`::: +(integer) The maximum time taken to execute a {ccs} request, in milliseconds. + +`avg`::: +(integer) The median time taken to execute a {ccs} request, in milliseconds. + +`p90`::: +(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds. +======= + +`took_mrt_true`:: +(object) Contains statistics about the time taken to execute {ccs} requests for which the +<> setting was set to `true`. ++ +.Properties of `took_mrt_true` +[%collapsible%open] +======= +`max`::: +(integer) The maximum time taken to execute a {ccs} request, in milliseconds. + +`avg`::: +(integer) The median time taken to execute a {ccs} request, in milliseconds. + +`p90`::: +(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds. +======= + +`took_mrt_false`:: +(object) Contains statistics about the time taken to execute {ccs} requests for which the +<> setting was set to `false`. ++ +.Properties of `took_mrt_false` +[%collapsible%open] +======= +`max`::: +(integer) The maximum time taken to execute a {ccs} request, in milliseconds. + +`avg`::: +(integer) The median time taken to execute a {ccs} request, in milliseconds. + +`p90`::: +(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds. +======= + +`remotes_per_search_max`:: +(integer) The maximum number of remote clusters that were queried in a single {ccs} request. + +`remotes_per_search_avg`:: +(float) The average number of remote clusters that were queried in a single {ccs} request. + +`failure_reasons`:: +(object) Contains statistics about the reasons for {ccs} request failures. +The keys are the failure reason names and the values are the number of requests that failed for that reason. + +`features`:: +(object) Contains statistics about the features used in {ccs} requests. The keys are the names of the search feature, +and the values are the number of requests that used that feature. Single request can use more than one feature +(e.g. both `async` and `wildcard`). Known features are: + +* `async` - <> + +* `mrt` - <> setting was set to `true`. + +* `wildcard` - <> for indices with wildcards was used in the search request. + +`clients`:: +(object) Contains statistics about the clients that executed {ccs} requests. +The keys are the names of the clients, and the values are the number of requests that were executed by that client. +Only known clients (such as `kibana` or `elasticsearch`) are counted. + +`clusters`:: +(object) Contains statistics about the clusters that were queried in {ccs} requests. +The keys are cluster names, and the values are per-cluster telemetry data. +This also includes the local cluster itself, which uses the name `(local)`. ++ +.Properties of per-cluster data: +[%collapsible%open] +======= +`total`::: +(integer) The total number of successful (not skipped) {ccs} requests that were executed against this cluster. +This may include requests where partial results were returned, but not requests in which the cluster has been skipped entirely. + +`skipped`::: +(integer) The total number of {ccs} requests for which this cluster was skipped. + +`took`::: +(object) Contains statistics about the time taken to execute requests against this cluster. ++ +.Properties of `took` +[%collapsible%open] +======== +`max`::: +(integer) The maximum time taken to execute a {ccs} request, in milliseconds. + +`avg`::: +(integer) The median time taken to execute a {ccs} request, in milliseconds. + +`p90`::: +(integer) The 90th percentile of the time taken to execute {ccs} requests, in milliseconds. +======== + +======= + +====== + +===== + [[cluster-stats-api-example]] ==== {api-examples-title} @@ -1607,7 +1743,35 @@ The API returns the following response: }, "repositories": { ... - } + }, + "ccs": { + "_search": { + "total": 7, + "success": 7, + "skipped": 0, + "took": { + "max": 36, + "avg": 20, + "p90": 33 + }, + "took_mrt_true": { + "max": 33, + "avg": 15, + "p90": 33 + }, + "took_mrt_false": { + "max": 36, + "avg": 26, + "p90": 36 + }, + "remotes_per_search_max": 3, + "remotes_per_search_avg": 2.0, + "failure_reasons": { ... }, + "features": { ... }, + "clients": { ... }, + "clusters": { ... } + } + } } -------------------------------------------------- // TESTRESPONSE[s/"plugins": \[[^\]]*\]/"plugins": $body.$_path/] @@ -1618,10 +1782,15 @@ The API returns the following response: // TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/] // TESTRESPONSE[s/"snapshots": \{[^\}]*\}/"snapshots": $body.$_path/] // TESTRESPONSE[s/"repositories": \{[^\}]*\}/"repositories": $body.$_path/] +// TESTRESPONSE[s/"clusters": \{[^\}]*\}/"clusters": $body.$_path/] +// TESTRESPONSE[s/"features": \{[^\}]*\}/"features": $body.$_path/] +// TESTRESPONSE[s/"clients": \{[^\}]*\}/"clients": $body.$_path/] +// TESTRESPONSE[s/"failure_reasons": \{[^\}]*\}/"failure_reasons": $body.$_path/] // TESTRESPONSE[s/"field_types": \[[^\]]*\]/"field_types": $body.$_path/] // TESTRESPONSE[s/"runtime_field_types": \[[^\]]*\]/"runtime_field_types": $body.$_path/] // TESTRESPONSE[s/"search": \{[^\}]*\}/"search": $body.$_path/] -// TESTRESPONSE[s/: true|false/: $body.$_path/] +// TESTRESPONSE[s/"remotes_per_search_avg": [.0-9]+/"remotes_per_search_avg": $body.$_path/] +// TESTRESPONSE[s/: (true|false)/: $body.$_path/] // TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] // TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] // These replacements do a few things: diff --git a/docs/reference/esql/functions/description/cosh.asciidoc b/docs/reference/esql/functions/description/cosh.asciidoc index bfe51f915287..ddace7da5434 100644 --- a/docs/reference/esql/functions/description/cosh.asciidoc +++ b/docs/reference/esql/functions/description/cosh.asciidoc @@ -2,4 +2,4 @@ *Description* -Returns the {wikipedia}/Hyperbolic_functions[hyperbolic cosine] of an angle. +Returns the {wikipedia}/Hyperbolic_functions[hyperbolic cosine] of a number. diff --git a/docs/reference/esql/functions/description/mv_median_absolute_deviation.asciidoc b/docs/reference/esql/functions/description/mv_median_absolute_deviation.asciidoc new file mode 100644 index 000000000000..765c4d322c3d --- /dev/null +++ b/docs/reference/esql/functions/description/mv_median_absolute_deviation.asciidoc @@ -0,0 +1,7 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Converts a multivalued field into a single valued field containing the median absolute deviation. It is calculated as the median of each data point's deviation from the median of the entire sample. That is, for a random variable `X`, the median absolute deviation is `median(|median(X) - X|)`. + +NOTE: If the field has an even number of values, the medians will be calculated as the average of the middle two values. If the value is not a floating point number, the averages are rounded towards 0. diff --git a/docs/reference/esql/functions/description/sin.asciidoc b/docs/reference/esql/functions/description/sin.asciidoc index ba12ba88ca37..40f5a46d1863 100644 --- a/docs/reference/esql/functions/description/sin.asciidoc +++ b/docs/reference/esql/functions/description/sin.asciidoc @@ -2,4 +2,4 @@ *Description* -Returns ths {wikipedia}/Sine_and_cosine[Sine] trigonometric function of an angle. +Returns the {wikipedia}/Sine_and_cosine[sine] of an angle. diff --git a/docs/reference/esql/functions/description/sinh.asciidoc b/docs/reference/esql/functions/description/sinh.asciidoc index bb7761e2a025..be7aee68f593 100644 --- a/docs/reference/esql/functions/description/sinh.asciidoc +++ b/docs/reference/esql/functions/description/sinh.asciidoc @@ -2,4 +2,4 @@ *Description* -Returns the {wikipedia}/Hyperbolic_functions[hyperbolic sine] of an angle. +Returns the {wikipedia}/Hyperbolic_functions[hyperbolic sine] of a number. diff --git a/docs/reference/esql/functions/description/space.asciidoc b/docs/reference/esql/functions/description/space.asciidoc new file mode 100644 index 000000000000..ee01da64f590 --- /dev/null +++ b/docs/reference/esql/functions/description/space.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Returns a string made of `number` spaces. diff --git a/docs/reference/esql/functions/description/tan.asciidoc b/docs/reference/esql/functions/description/tan.asciidoc index 925bebf044a7..dae37126f0ad 100644 --- a/docs/reference/esql/functions/description/tan.asciidoc +++ b/docs/reference/esql/functions/description/tan.asciidoc @@ -2,4 +2,4 @@ *Description* -Returns the {wikipedia}/Sine_and_cosine[Tangent] trigonometric function of an angle. +Returns the {wikipedia}/Sine_and_cosine[tangent] of an angle. diff --git a/docs/reference/esql/functions/description/tanh.asciidoc b/docs/reference/esql/functions/description/tanh.asciidoc index 7ee5e457dfe4..42c73a7536dc 100644 --- a/docs/reference/esql/functions/description/tanh.asciidoc +++ b/docs/reference/esql/functions/description/tanh.asciidoc @@ -2,4 +2,4 @@ *Description* -Returns the {wikipedia}/Hyperbolic_functions[Tangent] hyperbolic function of an angle. +Returns the {wikipedia}/Hyperbolic_functions[hyperbolic tangent] of a number. diff --git a/docs/reference/esql/functions/description/to_dateperiod.asciidoc b/docs/reference/esql/functions/description/to_dateperiod.asciidoc new file mode 100644 index 000000000000..443e377bf51c --- /dev/null +++ b/docs/reference/esql/functions/description/to_dateperiod.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Converts an input value into a `date_period` value. diff --git a/docs/reference/esql/functions/description/to_timeduration.asciidoc b/docs/reference/esql/functions/description/to_timeduration.asciidoc new file mode 100644 index 000000000000..87c405a98ff6 --- /dev/null +++ b/docs/reference/esql/functions/description/to_timeduration.asciidoc @@ -0,0 +1,5 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Description* + +Converts an input value into a `time_duration` value. diff --git a/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc b/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc index 20891126c20f..9084c008e890 100644 --- a/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc +++ b/docs/reference/esql/functions/examples/median_absolute_deviation.asciidoc @@ -4,19 +4,19 @@ [source.merge.styled,esql] ---- -include::{esql-specs}/stats_percentile.csv-spec[tag=median-absolute-deviation] +include::{esql-specs}/median_absolute_deviation.csv-spec[tag=median-absolute-deviation] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/stats_percentile.csv-spec[tag=median-absolute-deviation-result] +include::{esql-specs}/median_absolute_deviation.csv-spec[tag=median-absolute-deviation-result] |=== The expression can use inline functions. For example, to calculate the the median absolute deviation of the maximum values of a multivalued column, first use `MV_MAX` to get the maximum value per row, and use the result with the `MEDIAN_ABSOLUTE_DEVIATION` function [source.merge.styled,esql] ---- -include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMADNestedExpression] +include::{esql-specs}/median_absolute_deviation.csv-spec[tag=docsStatsMADNestedExpression] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/stats_percentile.csv-spec[tag=docsStatsMADNestedExpression-result] +include::{esql-specs}/median_absolute_deviation.csv-spec[tag=docsStatsMADNestedExpression-result] |=== diff --git a/docs/reference/esql/functions/examples/mv_median_absolute_deviation.asciidoc b/docs/reference/esql/functions/examples/mv_median_absolute_deviation.asciidoc new file mode 100644 index 000000000000..b36bc18a8017 --- /dev/null +++ b/docs/reference/esql/functions/examples/mv_median_absolute_deviation.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/mv_median_absolute_deviation.csv-spec[tag=example] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/mv_median_absolute_deviation.csv-spec[tag=example-result] +|=== + diff --git a/docs/reference/esql/functions/examples/space.asciidoc b/docs/reference/esql/functions/examples/space.asciidoc new file mode 100644 index 000000000000..cef3cd613902 --- /dev/null +++ b/docs/reference/esql/functions/examples/space.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=space] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=space-result] +|=== + diff --git a/docs/reference/esql/functions/examples/to_dateperiod.asciidoc b/docs/reference/esql/functions/examples/to_dateperiod.asciidoc new file mode 100644 index 000000000000..91272b33b45e --- /dev/null +++ b/docs/reference/esql/functions/examples/to_dateperiod.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/convert.csv-spec[tag=castToDatePeriod] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/convert.csv-spec[tag=castToDatePeriod-result] +|=== + diff --git a/docs/reference/esql/functions/examples/to_timeduration.asciidoc b/docs/reference/esql/functions/examples/to_timeduration.asciidoc new file mode 100644 index 000000000000..7e62a39bbe3e --- /dev/null +++ b/docs/reference/esql/functions/examples/to_timeduration.asciidoc @@ -0,0 +1,13 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/convert.csv-spec[tag=castToTimeDuration] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/convert.csv-spec[tag=castToTimeDuration-result] +|=== + diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json index 27705cd3897f..ab10460f48b2 100644 --- a/docs/reference/esql/functions/kibana/definition/case.json +++ b/docs/reference/esql/functions/kibana/definition/case.json @@ -22,6 +22,30 @@ "variadic" : true, "returnType" : "boolean" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "boolean", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "boolean", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "boolean" + }, { "params" : [ { @@ -40,6 +64,90 @@ "variadic" : true, "returnType" : "cartesian_point" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "cartesian_point", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "cartesian_point", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "cartesian_point" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "cartesian_shape", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "cartesian_shape" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "cartesian_shape", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "cartesian_shape", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "cartesian_shape" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "date", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "date" + }, { "params" : [ { @@ -53,6 +161,12 @@ "type" : "date", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "date", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." } ], "variadic" : true, @@ -76,6 +190,30 @@ "variadic" : true, "returnType" : "double" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "double", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "double", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "double" + }, { "params" : [ { @@ -94,6 +232,90 @@ "variadic" : true, "returnType" : "geo_point" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "geo_point", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "geo_point", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "geo_point" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "geo_shape", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "geo_shape" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "geo_shape", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "geo_shape", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "geo_shape" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "integer", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "integer" + }, { "params" : [ { @@ -107,6 +329,12 @@ "type" : "integer", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "integer", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." } ], "variadic" : true, @@ -130,6 +358,30 @@ "variadic" : true, "returnType" : "ip" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "ip", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "ip", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "ip" + }, { "params" : [ { @@ -143,12 +395,30 @@ "type" : "keyword", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." }, { - "name" : "falseValue", + "name" : "trueValue", "type" : "keyword", - "optional" : true, + "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "keyword", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." } ], "variadic" : true, @@ -172,6 +442,30 @@ "variadic" : true, "returnType" : "long" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "long", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "long", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "long" + }, { "params" : [ { @@ -190,6 +484,48 @@ "variadic" : true, "returnType" : "text" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "text", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "text", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "text" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "unsigned_long", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + } + ], + "variadic" : true, + "returnType" : "unsigned_long" + }, { "params" : [ { @@ -203,6 +539,12 @@ "type" : "unsigned_long", "optional" : false, "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "unsigned_long", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." } ], "variadic" : true, @@ -225,6 +567,30 @@ ], "variadic" : true, "returnType" : "version" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "version", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "version", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "version" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/cosh.json b/docs/reference/esql/functions/kibana/definition/cosh.json index a34eee15be37..dca261d971c4 100644 --- a/docs/reference/esql/functions/kibana/definition/cosh.json +++ b/docs/reference/esql/functions/kibana/definition/cosh.json @@ -2,15 +2,15 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "cosh", - "description" : "Returns the hyperbolic cosine of an angle.", + "description" : "Returns the hyperbolic cosine of a number.", "signatures" : [ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "double", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, @@ -19,10 +19,10 @@ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "integer", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, @@ -31,10 +31,10 @@ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "long", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, @@ -43,10 +43,10 @@ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "unsigned_long", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, diff --git a/docs/reference/esql/functions/kibana/definition/mv_median_absolute_deviation.json b/docs/reference/esql/functions/kibana/definition/mv_median_absolute_deviation.json new file mode 100644 index 000000000000..d6f1174a4e25 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/mv_median_absolute_deviation.json @@ -0,0 +1,60 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "mv_median_absolute_deviation", + "description" : "Converts a multivalued field into a single valued field containing the median absolute deviation.\n\nIt is calculated as the median of each data point's deviation from the median of the entire sample. That is, for a random variable `X`, the median absolute deviation is `median(|median(X) - X|)`.", + "note" : "If the field has an even number of values, the medians will be calculated as the average of the middle two values. If the value is not a floating point number, the averages are rounded towards 0.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "double", + "optional" : false, + "description" : "Multivalue expression." + } + ], + "variadic" : false, + "returnType" : "double" + }, + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Multivalue expression." + } + ], + "variadic" : false, + "returnType" : "integer" + }, + { + "params" : [ + { + "name" : "number", + "type" : "long", + "optional" : false, + "description" : "Multivalue expression." + } + ], + "variadic" : false, + "returnType" : "long" + }, + { + "params" : [ + { + "name" : "number", + "type" : "unsigned_long", + "optional" : false, + "description" : "Multivalue expression." + } + ], + "variadic" : false, + "returnType" : "unsigned_long" + } + ], + "examples" : [ + "ROW values = [0, 2, 5, 6]\n| EVAL median_absolute_deviation = MV_MEDIAN_ABSOLUTE_DEVIATION(values), median = MV_MEDIAN(values)" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/sin.json b/docs/reference/esql/functions/kibana/definition/sin.json index 8d092bd0c15a..ce46fa66a2ac 100644 --- a/docs/reference/esql/functions/kibana/definition/sin.json +++ b/docs/reference/esql/functions/kibana/definition/sin.json @@ -2,7 +2,7 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "sin", - "description" : "Returns ths Sine trigonometric function of an angle.", + "description" : "Returns the sine of an angle.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/sinh.json b/docs/reference/esql/functions/kibana/definition/sinh.json index 2261b18134f6..e773e95e5e9e 100644 --- a/docs/reference/esql/functions/kibana/definition/sinh.json +++ b/docs/reference/esql/functions/kibana/definition/sinh.json @@ -2,15 +2,15 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "sinh", - "description" : "Returns the hyperbolic sine of an angle.", + "description" : "Returns the hyperbolic sine of a number.", "signatures" : [ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "double", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, @@ -19,10 +19,10 @@ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "integer", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, @@ -31,10 +31,10 @@ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "long", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, @@ -43,10 +43,10 @@ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "unsigned_long", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, diff --git a/docs/reference/esql/functions/kibana/definition/space.json b/docs/reference/esql/functions/kibana/definition/space.json new file mode 100644 index 000000000000..acf7466284d3 --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/space.json @@ -0,0 +1,23 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "space", + "description" : "Returns a string made of `number` spaces.", + "signatures" : [ + { + "params" : [ + { + "name" : "number", + "type" : "integer", + "optional" : false, + "description" : "Number of spaces in result." + } + ], + "variadic" : false, + "returnType" : "keyword" + } + ], + "examples" : [ + "ROW message = CONCAT(\"Hello\", SPACE(1), \"World!\");" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/tan.json b/docs/reference/esql/functions/kibana/definition/tan.json index 7498964dc1a2..f5452f310a99 100644 --- a/docs/reference/esql/functions/kibana/definition/tan.json +++ b/docs/reference/esql/functions/kibana/definition/tan.json @@ -2,7 +2,7 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "tan", - "description" : "Returns the Tangent trigonometric function of an angle.", + "description" : "Returns the tangent of an angle.", "signatures" : [ { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/tanh.json b/docs/reference/esql/functions/kibana/definition/tanh.json index 507f62d394be..081d606b6421 100644 --- a/docs/reference/esql/functions/kibana/definition/tanh.json +++ b/docs/reference/esql/functions/kibana/definition/tanh.json @@ -2,15 +2,15 @@ "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", "type" : "eval", "name" : "tanh", - "description" : "Returns the Tangent hyperbolic function of an angle.", + "description" : "Returns the hyperbolic tangent of a number.", "signatures" : [ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "double", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, @@ -19,10 +19,10 @@ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "integer", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, @@ -31,10 +31,10 @@ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "long", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, @@ -43,10 +43,10 @@ { "params" : [ { - "name" : "angle", + "name" : "number", "type" : "unsigned_long", "optional" : false, - "description" : "An angle, in radians. If `null`, the function returns `null`." + "description" : "Numeric expression. If `null`, the function returns `null`." } ], "variadic" : false, diff --git a/docs/reference/esql/functions/kibana/definition/to_dateperiod.json b/docs/reference/esql/functions/kibana/definition/to_dateperiod.json new file mode 100644 index 000000000000..dc9176f4cc0b --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/to_dateperiod.json @@ -0,0 +1,47 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "to_dateperiod", + "description" : "Converts an input value into a `date_period` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "date_period", + "optional" : false, + "description" : "Input value. The input is a valid constant date period expression." + } + ], + "variadic" : false, + "returnType" : "date_period" + }, + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Input value. The input is a valid constant date period expression." + } + ], + "variadic" : false, + "returnType" : "date_period" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Input value. The input is a valid constant date period expression." + } + ], + "variadic" : false, + "returnType" : "date_period" + } + ], + "examples" : [ + "row x = \"2024-01-01\"::datetime | eval y = x + \"3 DAYS\"::date_period, z = x - to_dateperiod(\"3 days\");" + ] +} diff --git a/docs/reference/esql/functions/kibana/definition/to_timeduration.json b/docs/reference/esql/functions/kibana/definition/to_timeduration.json new file mode 100644 index 000000000000..039de323044e --- /dev/null +++ b/docs/reference/esql/functions/kibana/definition/to_timeduration.json @@ -0,0 +1,47 @@ +{ + "comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.", + "type" : "eval", + "name" : "to_timeduration", + "description" : "Converts an input value into a `time_duration` value.", + "signatures" : [ + { + "params" : [ + { + "name" : "field", + "type" : "keyword", + "optional" : false, + "description" : "Input value. The input is a valid constant time duration expression." + } + ], + "variadic" : false, + "returnType" : "time_duration" + }, + { + "params" : [ + { + "name" : "field", + "type" : "text", + "optional" : false, + "description" : "Input value. The input is a valid constant time duration expression." + } + ], + "variadic" : false, + "returnType" : "time_duration" + }, + { + "params" : [ + { + "name" : "field", + "type" : "time_duration", + "optional" : false, + "description" : "Input value. The input is a valid constant time duration expression." + } + ], + "variadic" : false, + "returnType" : "time_duration" + } + ], + "examples" : [ + "row x = \"2024-01-01\"::datetime | eval y = x + \"3 hours\"::time_duration, z = x - to_timeduration(\"3 hours\");" + ] +} diff --git a/docs/reference/esql/functions/kibana/docs/cosh.md b/docs/reference/esql/functions/kibana/docs/cosh.md index d5cc126650e4..033842952178 100644 --- a/docs/reference/esql/functions/kibana/docs/cosh.md +++ b/docs/reference/esql/functions/kibana/docs/cosh.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### COSH -Returns the hyperbolic cosine of an angle. +Returns the hyperbolic cosine of a number. ``` ROW a=1.8 diff --git a/docs/reference/esql/functions/kibana/docs/mv_median_absolute_deviation.md b/docs/reference/esql/functions/kibana/docs/mv_median_absolute_deviation.md new file mode 100644 index 000000000000..191ce3ce60ae --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/mv_median_absolute_deviation.md @@ -0,0 +1,14 @@ + + +### MV_MEDIAN_ABSOLUTE_DEVIATION +Converts a multivalued field into a single valued field containing the median absolute deviation. + +It is calculated as the median of each data point's deviation from the median of the entire sample. That is, for a random variable `X`, the median absolute deviation is `median(|median(X) - X|)`. + +``` +ROW values = [0, 2, 5, 6] +| EVAL median_absolute_deviation = MV_MEDIAN_ABSOLUTE_DEVIATION(values), median = MV_MEDIAN(values) +``` +Note: If the field has an even number of values, the medians will be calculated as the average of the middle two values. If the value is not a floating point number, the averages are rounded towards 0. diff --git a/docs/reference/esql/functions/kibana/docs/sin.md b/docs/reference/esql/functions/kibana/docs/sin.md index 1e1fc5ee9c93..d1128350d12f 100644 --- a/docs/reference/esql/functions/kibana/docs/sin.md +++ b/docs/reference/esql/functions/kibana/docs/sin.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### SIN -Returns ths Sine trigonometric function of an angle. +Returns the sine of an angle. ``` ROW a=1.8 diff --git a/docs/reference/esql/functions/kibana/docs/sinh.md b/docs/reference/esql/functions/kibana/docs/sinh.md index 886b3b95b09f..249c9bb0906c 100644 --- a/docs/reference/esql/functions/kibana/docs/sinh.md +++ b/docs/reference/esql/functions/kibana/docs/sinh.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### SINH -Returns the hyperbolic sine of an angle. +Returns the hyperbolic sine of a number. ``` ROW a=1.8 diff --git a/docs/reference/esql/functions/kibana/docs/space.md b/docs/reference/esql/functions/kibana/docs/space.md new file mode 100644 index 000000000000..3112bf953dd6 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/space.md @@ -0,0 +1,10 @@ + + +### SPACE +Returns a string made of `number` spaces. + +``` +ROW message = CONCAT("Hello", SPACE(1), "World!"); +``` diff --git a/docs/reference/esql/functions/kibana/docs/tan.md b/docs/reference/esql/functions/kibana/docs/tan.md index f1594f4de747..41bd3c814b17 100644 --- a/docs/reference/esql/functions/kibana/docs/tan.md +++ b/docs/reference/esql/functions/kibana/docs/tan.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### TAN -Returns the Tangent trigonometric function of an angle. +Returns the tangent of an angle. ``` ROW a=1.8 diff --git a/docs/reference/esql/functions/kibana/docs/tanh.md b/docs/reference/esql/functions/kibana/docs/tanh.md index c4a70dec00ba..365add190de8 100644 --- a/docs/reference/esql/functions/kibana/docs/tanh.md +++ b/docs/reference/esql/functions/kibana/docs/tanh.md @@ -3,7 +3,7 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ --> ### TANH -Returns the Tangent hyperbolic function of an angle. +Returns the hyperbolic tangent of a number. ``` ROW a=1.8 diff --git a/docs/reference/esql/functions/kibana/docs/to_dateperiod.md b/docs/reference/esql/functions/kibana/docs/to_dateperiod.md new file mode 100644 index 000000000000..adbbe7578305 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/to_dateperiod.md @@ -0,0 +1,10 @@ + + +### TO_DATEPERIOD +Converts an input value into a `date_period` value. + +``` +row x = "2024-01-01"::datetime | eval y = x + "3 DAYS"::date_period, z = x - to_dateperiod("3 days"); +``` diff --git a/docs/reference/esql/functions/kibana/docs/to_timeduration.md b/docs/reference/esql/functions/kibana/docs/to_timeduration.md new file mode 100644 index 000000000000..52e32ba97d11 --- /dev/null +++ b/docs/reference/esql/functions/kibana/docs/to_timeduration.md @@ -0,0 +1,10 @@ + + +### TO_TIMEDURATION +Converts an input value into a `time_duration` value. + +``` +row x = "2024-01-01"::datetime | eval y = x + "3 hours"::time_duration, z = x - to_timeduration("3 hours"); +``` diff --git a/docs/reference/esql/functions/kibana/inline_cast.json b/docs/reference/esql/functions/kibana/inline_cast.json index f71572d3d651..f1aa283c52e9 100644 --- a/docs/reference/esql/functions/kibana/inline_cast.json +++ b/docs/reference/esql/functions/kibana/inline_cast.json @@ -3,6 +3,7 @@ "boolean" : "to_boolean", "cartesian_point" : "to_cartesianpoint", "cartesian_shape" : "to_cartesianshape", + "date_period" : "to_dateperiod", "datetime" : "to_datetime", "double" : "to_double", "geo_point" : "to_geopoint", @@ -14,6 +15,7 @@ "long" : "to_long", "string" : "to_string", "text" : "to_string", + "time_duration" : "to_timeduration", "unsigned_long" : "to_unsigned_long", "version" : "to_version" } \ No newline at end of file diff --git a/docs/reference/esql/functions/layout/mv_median_absolute_deviation.asciidoc b/docs/reference/esql/functions/layout/mv_median_absolute_deviation.asciidoc new file mode 100644 index 000000000000..b594d589e610 --- /dev/null +++ b/docs/reference/esql/functions/layout/mv_median_absolute_deviation.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-mv_median_absolute_deviation]] +=== `MV_MEDIAN_ABSOLUTE_DEVIATION` + +*Syntax* + +[.text-center] +image::esql/functions/signature/mv_median_absolute_deviation.svg[Embedded,opts=inline] + +include::../parameters/mv_median_absolute_deviation.asciidoc[] +include::../description/mv_median_absolute_deviation.asciidoc[] +include::../types/mv_median_absolute_deviation.asciidoc[] +include::../examples/mv_median_absolute_deviation.asciidoc[] diff --git a/docs/reference/esql/functions/layout/space.asciidoc b/docs/reference/esql/functions/layout/space.asciidoc new file mode 100644 index 000000000000..22355d1e2497 --- /dev/null +++ b/docs/reference/esql/functions/layout/space.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-space]] +=== `SPACE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/space.svg[Embedded,opts=inline] + +include::../parameters/space.asciidoc[] +include::../description/space.asciidoc[] +include::../types/space.asciidoc[] +include::../examples/space.asciidoc[] diff --git a/docs/reference/esql/functions/layout/to_dateperiod.asciidoc b/docs/reference/esql/functions/layout/to_dateperiod.asciidoc new file mode 100644 index 000000000000..0345c1a6680c --- /dev/null +++ b/docs/reference/esql/functions/layout/to_dateperiod.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-to_dateperiod]] +=== `TO_DATEPERIOD` + +*Syntax* + +[.text-center] +image::esql/functions/signature/to_dateperiod.svg[Embedded,opts=inline] + +include::../parameters/to_dateperiod.asciidoc[] +include::../description/to_dateperiod.asciidoc[] +include::../types/to_dateperiod.asciidoc[] +include::../examples/to_dateperiod.asciidoc[] diff --git a/docs/reference/esql/functions/layout/to_timeduration.asciidoc b/docs/reference/esql/functions/layout/to_timeduration.asciidoc new file mode 100644 index 000000000000..bed4743c730a --- /dev/null +++ b/docs/reference/esql/functions/layout/to_timeduration.asciidoc @@ -0,0 +1,15 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +[discrete] +[[esql-to_timeduration]] +=== `TO_TIMEDURATION` + +*Syntax* + +[.text-center] +image::esql/functions/signature/to_timeduration.svg[Embedded,opts=inline] + +include::../parameters/to_timeduration.asciidoc[] +include::../description/to_timeduration.asciidoc[] +include::../types/to_timeduration.asciidoc[] +include::../examples/to_timeduration.asciidoc[] diff --git a/docs/reference/esql/functions/mv-functions.asciidoc b/docs/reference/esql/functions/mv-functions.asciidoc index bd5f14cdd355..4093e44c1691 100644 --- a/docs/reference/esql/functions/mv-functions.asciidoc +++ b/docs/reference/esql/functions/mv-functions.asciidoc @@ -17,6 +17,7 @@ * <> * <> * <> +* <> * <> * <> * <> @@ -34,6 +35,7 @@ include::layout/mv_first.asciidoc[] include::layout/mv_last.asciidoc[] include::layout/mv_max.asciidoc[] include::layout/mv_median.asciidoc[] +include::layout/mv_median_absolute_deviation.asciidoc[] include::layout/mv_min.asciidoc[] include::layout/mv_pseries_weighted_sum.asciidoc[] include::layout/mv_slice.asciidoc[] diff --git a/docs/reference/esql/functions/parameters/case.asciidoc b/docs/reference/esql/functions/parameters/case.asciidoc index ee6f7e499b3b..f12eade4d578 100644 --- a/docs/reference/esql/functions/parameters/case.asciidoc +++ b/docs/reference/esql/functions/parameters/case.asciidoc @@ -7,3 +7,6 @@ A condition. `trueValue`:: The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches. + +`elseValue`:: +The value that's returned when no condition evaluates to `true`. diff --git a/docs/reference/esql/functions/parameters/cosh.asciidoc b/docs/reference/esql/functions/parameters/cosh.asciidoc index a1c3f7edf30c..65013f4c2126 100644 --- a/docs/reference/esql/functions/parameters/cosh.asciidoc +++ b/docs/reference/esql/functions/parameters/cosh.asciidoc @@ -2,5 +2,5 @@ *Parameters* -`angle`:: -An angle, in radians. If `null`, the function returns `null`. +`number`:: +Numeric expression. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/mv_median_absolute_deviation.asciidoc b/docs/reference/esql/functions/parameters/mv_median_absolute_deviation.asciidoc new file mode 100644 index 000000000000..47859c7e2b32 --- /dev/null +++ b/docs/reference/esql/functions/parameters/mv_median_absolute_deviation.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: +Multivalue expression. diff --git a/docs/reference/esql/functions/parameters/sinh.asciidoc b/docs/reference/esql/functions/parameters/sinh.asciidoc index a1c3f7edf30c..65013f4c2126 100644 --- a/docs/reference/esql/functions/parameters/sinh.asciidoc +++ b/docs/reference/esql/functions/parameters/sinh.asciidoc @@ -2,5 +2,5 @@ *Parameters* -`angle`:: -An angle, in radians. If `null`, the function returns `null`. +`number`:: +Numeric expression. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/space.asciidoc b/docs/reference/esql/functions/parameters/space.asciidoc new file mode 100644 index 000000000000..de4efd34c0ba --- /dev/null +++ b/docs/reference/esql/functions/parameters/space.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`number`:: +Number of spaces in result. diff --git a/docs/reference/esql/functions/parameters/tanh.asciidoc b/docs/reference/esql/functions/parameters/tanh.asciidoc index a1c3f7edf30c..65013f4c2126 100644 --- a/docs/reference/esql/functions/parameters/tanh.asciidoc +++ b/docs/reference/esql/functions/parameters/tanh.asciidoc @@ -2,5 +2,5 @@ *Parameters* -`angle`:: -An angle, in radians. If `null`, the function returns `null`. +`number`:: +Numeric expression. If `null`, the function returns `null`. diff --git a/docs/reference/esql/functions/parameters/to_dateperiod.asciidoc b/docs/reference/esql/functions/parameters/to_dateperiod.asciidoc new file mode 100644 index 000000000000..1e5ed14cf44a --- /dev/null +++ b/docs/reference/esql/functions/parameters/to_dateperiod.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: +Input value. The input is a valid constant date period expression. diff --git a/docs/reference/esql/functions/parameters/to_timeduration.asciidoc b/docs/reference/esql/functions/parameters/to_timeduration.asciidoc new file mode 100644 index 000000000000..0289dc37dbfe --- /dev/null +++ b/docs/reference/esql/functions/parameters/to_timeduration.asciidoc @@ -0,0 +1,6 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Parameters* + +`field`:: +Input value. The input is a valid constant time duration expression. diff --git a/docs/reference/esql/functions/signature/case.svg b/docs/reference/esql/functions/signature/case.svg index d6fd7da38aca..0d51a0647627 100644 --- a/docs/reference/esql/functions/signature/case.svg +++ b/docs/reference/esql/functions/signature/case.svg @@ -1 +1 @@ -CASE(condition,trueValue) \ No newline at end of file +CASE(condition,trueValueelseValue) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/cosh.svg b/docs/reference/esql/functions/signature/cosh.svg index 11b14d922929..9b9eddd3cb80 100644 --- a/docs/reference/esql/functions/signature/cosh.svg +++ b/docs/reference/esql/functions/signature/cosh.svg @@ -1 +1 @@ -COSH(angle) \ No newline at end of file +COSH(number) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mv_median_absolute_deviation.svg b/docs/reference/esql/functions/signature/mv_median_absolute_deviation.svg new file mode 100644 index 000000000000..7d8a131a9101 --- /dev/null +++ b/docs/reference/esql/functions/signature/mv_median_absolute_deviation.svg @@ -0,0 +1 @@ +MV_MEDIAN_ABSOLUTE_DEVIATION(number) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sinh.svg b/docs/reference/esql/functions/signature/sinh.svg index 0bb4ac31dee3..16e7ddb6b653 100644 --- a/docs/reference/esql/functions/signature/sinh.svg +++ b/docs/reference/esql/functions/signature/sinh.svg @@ -1 +1 @@ -SINH(angle) \ No newline at end of file +SINH(number) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/space.svg b/docs/reference/esql/functions/signature/space.svg new file mode 100644 index 000000000000..c506c25dfcb1 --- /dev/null +++ b/docs/reference/esql/functions/signature/space.svg @@ -0,0 +1 @@ +SPACE(number) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/tanh.svg b/docs/reference/esql/functions/signature/tanh.svg index f7b968f8b30c..c2edfe2d6942 100644 --- a/docs/reference/esql/functions/signature/tanh.svg +++ b/docs/reference/esql/functions/signature/tanh.svg @@ -1 +1 @@ -TANH(angle) \ No newline at end of file +TANH(number) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_dateperiod.svg b/docs/reference/esql/functions/signature/to_dateperiod.svg new file mode 100644 index 000000000000..302a9ee3bfa6 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_dateperiod.svg @@ -0,0 +1 @@ +TO_DATEPERIOD(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_timeduration.svg b/docs/reference/esql/functions/signature/to_timeduration.svg new file mode 100644 index 000000000000..b237441b3b40 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_timeduration.svg @@ -0,0 +1 @@ +TO_TIMEDURATION(field) \ No newline at end of file diff --git a/docs/reference/esql/functions/string-functions.asciidoc b/docs/reference/esql/functions/string-functions.asciidoc index d4b120ad1c45..ed97769b900e 100644 --- a/docs/reference/esql/functions/string-functions.asciidoc +++ b/docs/reference/esql/functions/string-functions.asciidoc @@ -19,6 +19,7 @@ * <> * <> * <> +* <> * <> * <> * <> @@ -39,6 +40,7 @@ include::layout/repeat.asciidoc[] include::layout/replace.asciidoc[] include::layout/right.asciidoc[] include::layout/rtrim.asciidoc[] +include::layout/space.asciidoc[] include::layout/split.asciidoc[] include::layout/starts_with.asciidoc[] include::layout/substring.asciidoc[] diff --git a/docs/reference/esql/functions/type-conversion-functions.asciidoc b/docs/reference/esql/functions/type-conversion-functions.asciidoc index 96c29a776bc2..9ac9ec290c07 100644 --- a/docs/reference/esql/functions/type-conversion-functions.asciidoc +++ b/docs/reference/esql/functions/type-conversion-functions.asciidoc @@ -16,6 +16,7 @@ * <> * <> * <> +* experimental:[] <> * <> * <> * <> @@ -26,6 +27,7 @@ * <> * <> * <> +* experimental:[] <> * experimental:[] <> * <> // end::type_list[] @@ -33,6 +35,7 @@ include::layout/to_boolean.asciidoc[] include::layout/to_cartesianpoint.asciidoc[] include::layout/to_cartesianshape.asciidoc[] +include::layout/to_dateperiod.asciidoc[] include::layout/to_datetime.asciidoc[] include::layout/to_degrees.asciidoc[] include::layout/to_double.asciidoc[] @@ -43,5 +46,6 @@ include::layout/to_ip.asciidoc[] include::layout/to_long.asciidoc[] include::layout/to_radians.asciidoc[] include::layout/to_string.asciidoc[] +include::layout/to_timeduration.asciidoc[] include::layout/to_unsigned_long.asciidoc[] include::layout/to_version.asciidoc[] diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc index f6c8cfe9361d..e8aa3eaf5daa 100644 --- a/docs/reference/esql/functions/types/case.asciidoc +++ b/docs/reference/esql/functions/types/case.asciidoc @@ -4,16 +4,33 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -condition | trueValue | result -boolean | boolean | boolean -boolean | cartesian_point | cartesian_point -boolean | date | date -boolean | double | double -boolean | geo_point | geo_point -boolean | integer | integer -boolean | ip | ip -boolean | long | long -boolean | text | text -boolean | unsigned_long | unsigned_long -boolean | version | version +condition | trueValue | elseValue | result +boolean | boolean | boolean | boolean +boolean | boolean | | boolean +boolean | cartesian_point | cartesian_point | cartesian_point +boolean | cartesian_point | | cartesian_point +boolean | cartesian_shape | cartesian_shape | cartesian_shape +boolean | cartesian_shape | | cartesian_shape +boolean | date | date | date +boolean | date | | date +boolean | double | double | double +boolean | double | | double +boolean | geo_point | geo_point | geo_point +boolean | geo_point | | geo_point +boolean | geo_shape | geo_shape | geo_shape +boolean | geo_shape | | geo_shape +boolean | integer | integer | integer +boolean | integer | | integer +boolean | ip | ip | ip +boolean | ip | | ip +boolean | keyword | keyword | keyword +boolean | keyword | | keyword +boolean | long | long | long +boolean | long | | long +boolean | text | text | text +boolean | text | | text +boolean | unsigned_long | unsigned_long | unsigned_long +boolean | unsigned_long | | unsigned_long +boolean | version | version | version +boolean | version | | version |=== diff --git a/docs/reference/esql/functions/types/cosh.asciidoc b/docs/reference/esql/functions/types/cosh.asciidoc index d96a34b67853..7cda278abdb5 100644 --- a/docs/reference/esql/functions/types/cosh.asciidoc +++ b/docs/reference/esql/functions/types/cosh.asciidoc @@ -4,7 +4,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -angle | result +number | result double | double integer | double long | double diff --git a/docs/reference/esql/functions/types/mv_median_absolute_deviation.asciidoc b/docs/reference/esql/functions/types/mv_median_absolute_deviation.asciidoc new file mode 100644 index 000000000000..d81bbf36ae3f --- /dev/null +++ b/docs/reference/esql/functions/types/mv_median_absolute_deviation.asciidoc @@ -0,0 +1,12 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | result +double | double +integer | integer +long | long +unsigned_long | unsigned_long +|=== diff --git a/docs/reference/esql/functions/types/sinh.asciidoc b/docs/reference/esql/functions/types/sinh.asciidoc index d96a34b67853..7cda278abdb5 100644 --- a/docs/reference/esql/functions/types/sinh.asciidoc +++ b/docs/reference/esql/functions/types/sinh.asciidoc @@ -4,7 +4,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -angle | result +number | result double | double integer | double long | double diff --git a/docs/reference/esql/functions/types/space.asciidoc b/docs/reference/esql/functions/types/space.asciidoc new file mode 100644 index 000000000000..3f2e89f80d3e --- /dev/null +++ b/docs/reference/esql/functions/types/space.asciidoc @@ -0,0 +1,9 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +number | result +integer | keyword +|=== diff --git a/docs/reference/esql/functions/types/tanh.asciidoc b/docs/reference/esql/functions/types/tanh.asciidoc index d96a34b67853..7cda278abdb5 100644 --- a/docs/reference/esql/functions/types/tanh.asciidoc +++ b/docs/reference/esql/functions/types/tanh.asciidoc @@ -4,7 +4,7 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -angle | result +number | result double | double integer | double long | double diff --git a/docs/reference/esql/functions/types/to_dateperiod.asciidoc b/docs/reference/esql/functions/types/to_dateperiod.asciidoc new file mode 100644 index 000000000000..1bbc33fe3ca7 --- /dev/null +++ b/docs/reference/esql/functions/types/to_dateperiod.asciidoc @@ -0,0 +1,11 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | result +date_period | date_period +keyword | date_period +text | date_period +|=== diff --git a/docs/reference/esql/functions/types/to_timeduration.asciidoc b/docs/reference/esql/functions/types/to_timeduration.asciidoc new file mode 100644 index 000000000000..b82a5bb4f9f8 --- /dev/null +++ b/docs/reference/esql/functions/types/to_timeduration.asciidoc @@ -0,0 +1,11 @@ +// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it. + +*Supported types* + +[%header.monospaced.styled,format=dsv,separator=|] +|=== +field | result +keyword | time_duration +text | time_duration +time_duration | time_duration +|=== diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index afc3b6556c67..51a01b5b7c7e 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -5,6 +5,8 @@ A similarity (scoring / ranking model) defines how matching documents are scored. Similarity is per field, meaning that via the mapping one can define a different similarity per field. +Similarity is only applicable for text type and keyword type fields. + Configuring a custom similarity is considered an expert feature and the builtin similarities are most likely sufficient as is described in <>. diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index 52631bc0956b..0032c7b46bfb 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -19,7 +19,8 @@ An {es} <> is the process of performing a Lucene commit and starting a new translog generation. Flushes are performed automatically in the background in order to make sure the translog does not grow too large, which would make replaying its operations take a considerable amount of time during -recovery. The ability to perform a flush manually is also exposed through an +recovery. The translog size will never exceed `1%` of the disk size. +The ability to perform a flush manually is also exposed through an API, although this is rarely needed. [discrete] @@ -71,7 +72,8 @@ update, or bulk request. This setting accepts the following parameters: The translog stores all operations that are not yet safely persisted in Lucene (i.e., are not part of a Lucene commit point). Although these operations are available for reads, they will need to be replayed if the shard was stopped - and had to be recovered. This setting controls the maximum total size of these - operations, to prevent recoveries from taking too long. Once the maximum size - has been reached a flush will happen, generating a new Lucene commit point. - Defaults to `512mb`. + and had to be recovered. + This setting controls the maximum total size of these operations to prevent + recoveries from taking too long. Once the maximum size has been reached, a flush + will happen, generating a new Lucene commit point. Defaults to `10 GB`. + diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index ba26a563541f..b106e2c4a08f 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -31,27 +31,46 @@ use the <>. * Requires the `manage_inference` <> (the built-in `inference_admin` role grants this privilege) +[discrete] +[[put-inference-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=inference-id] + +``:: +(Required, string) +include::inference-shared.asciidoc[tag=task-type] ++ +-- +Refer to the service list in the <> for the available task types. +-- + + [discrete] [[put-inference-api-desc]] ==== {api-description-title} The create {infer} API enables you to create an {infer} endpoint and configure a {ml} model to perform a specific {infer} task. -The following services are available through the {infer} API, click the links to review the configuration details of the services: - -* <> -* <> -* <> -* <> -* <> -* <> -* <> (for built-in models and models uploaded through Eland) -* <> -* <> -* <> -* <> -* <> -* <> +The following services are available through the {infer} API. +You can find the available task types next to the service name. +Click the links to review the configuration details of the services: + +* <> (`rerank`, `sparse_embedding`, `text_embedding`) +* <> (`completion`, `text_embedding`) +* <> (`completion`) +* <> (`completion`, `text_embedding`) +* <> (`completion`, `text_embedding`) +* <> (`completion`, `rerank`, `text_embedding`) +* <> (`rerank`, `sparse_embedding`, `text_embedding` - this service is for built-in models and models uploaded through Eland) +* <> (`sparse_embedding`) +* <> (`completion`, `text_embedding`) +* <> (`rerank`, `text_embedding`) +* <> (`text_embedding`) +* <> (`text_embedding`) +* <> (`completion`, `text_embedding`) The {es} and ELSER services run on a {ml} node in your {es} cluster. The rest of the services connect to external providers. \ No newline at end of file diff --git a/docs/reference/ingest/processors/inference.asciidoc b/docs/reference/ingest/processors/inference.asciidoc index 982da1fe17f7..c942959d34e5 100644 --- a/docs/reference/ingest/processors/inference.asciidoc +++ b/docs/reference/ingest/processors/inference.asciidoc @@ -31,6 +31,7 @@ include::common-options.asciidoc[] `field_map` fields. For NLP models, use the `input_output` option. For {dfanalytics} models, use the `target_field` and `field_map` option. * Each {infer} input field must be single strings, not arrays of strings. +* The `input_field` is processed as is and ignores any <>'s <> at time of {infer} run. ================================================== [discrete] diff --git a/docs/reference/intro.asciidoc b/docs/reference/intro.asciidoc index 3ad5a9bd71c0..f80856368af2 100644 --- a/docs/reference/intro.asciidoc +++ b/docs/reference/intro.asciidoc @@ -14,12 +14,12 @@ Use {es} to search, index, store, and analyze data of all shapes and sizes in ne {es} is used for a wide and growing range of use cases. Here are a few examples: -* *Monitor log and event data*. Store logs, metrics, and event data for observability and security information and event management (SIEM). -* *Build search applications*. Add search capabilities to apps or websites, or build enterprise search engines over your organization's internal data sources. -* *Vector database*. Store and search vectorized data, and create vector embeddings with built-in and third-party natural language processing (NLP) models. -* *Retrieval augmented generation (RAG)*. Use {es} as a retrieval engine to augment Generative AI models. -* *Application and security monitoring*. Monitor and analyze application performance and security data effectively. -* *Machine learning*. Use {ml} to automatically model the behavior of your data in real-time. +* *Monitor log and event data*: Store logs, metrics, and event data for observability and security information and event management (SIEM). +* *Build search applications*: Add search capabilities to apps or websites, or build search engines over internal data. +* *Vector database*: Store and search vectorized data, and create vector embeddings with built-in and third-party natural language processing (NLP) models. +* *Retrieval augmented generation (RAG)*: Use {es} as a retrieval engine to augment generative AI models. +* *Application and security monitoring*: Monitor and analyze application performance and security data. +* *Machine learning*: Use {ml} to automatically model the behavior of your data in real-time. This is just a sample of search, observability, and security use cases enabled by {es}. Refer to our https://www.elastic.co/customers/success-stories[customer success stories] for concrete examples across a range of industries. @@ -41,25 +41,25 @@ https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its To use {es}, you need a running instance of the {es} service. You can deploy {es} in various ways: -* <>. Get started quickly with a minimal local Docker setup. -* {cloud}/ec-getting-started-trial.html[*Elastic Cloud*]. {es} is available as part of our hosted Elastic Stack offering, deployed in the cloud with your provider of choice. Sign up for a https://cloud.elastic.co/registration[14 day free trial]. -* {serverless-docs}/general/sign-up-trial[*Elastic Cloud Serverless* (technical preview)]. Create serverless projects for autoscaled and fully managed {es} deployments. Sign up for a https://cloud.elastic.co/serverless-registration[14 day free trial]. +* <>: Get started quickly with a minimal local Docker setup. +* {cloud}/ec-getting-started-trial.html[*Elastic Cloud*]: {es} is available as part of our hosted Elastic Stack offering, deployed in the cloud with your provider of choice. Sign up for a https://cloud.elastic.co/registration[14-day free trial]. +* {serverless-docs}/general/sign-up-trial[*Elastic Cloud Serverless* (technical preview)]: Create serverless projects for autoscaled and fully managed {es} deployments. Sign up for a https://cloud.elastic.co/serverless-registration[14-day free trial]. **Advanced deployment options** -* <>. Install, configure, and run {es} on your own premises. -* {ece-ref}/Elastic-Cloud-Enterprise-overview.html[*Elastic Cloud Enterprise*]. Deploy Elastic Cloud on public or private clouds, virtual machines, or your own premises. -* {eck-ref}/k8s-overview.html[*Elastic Cloud on Kubernetes*]. Deploy Elastic Cloud on Kubernetes. +* <>: Install, configure, and run {es} on your own premises. +* {ece-ref}/Elastic-Cloud-Enterprise-overview.html[*Elastic Cloud Enterprise*]: Deploy Elastic Cloud on public or private clouds, virtual machines, or your own premises. +* {eck-ref}/k8s-overview.html[*Elastic Cloud on Kubernetes*]: Deploy Elastic Cloud on Kubernetes. [discrete] [[elasticsearch-next-steps]] === Learn more -Some resources to help you get started: +Here are some resources to help you get started: -* <>. A beginner's guide to deploying your first {es} instance, indexing data, and running queries. -* https://elastic.co/webinars/getting-started-elasticsearch[Webinar: Introduction to {es}]. Register for our live webinars to learn directly from {es} experts. -* https://www.elastic.co/search-labs[Elastic Search Labs]. Tutorials and blogs that explore AI-powered search using the latest {es} features. +* <>: A beginner's guide to deploying your first {es} instance, indexing data, and running queries. +* https://elastic.co/webinars/getting-started-elasticsearch[Webinar: Introduction to {es}]: Register for our live webinars to learn directly from {es} experts. +* https://www.elastic.co/search-labs[Elastic Search Labs]: Tutorials and blogs that explore AI-powered search using the latest {es} features. ** Follow our tutorial https://www.elastic.co/search-labs/tutorials/search-tutorial/welcome[to build a hybrid search solution in Python]. ** Check out the https://github.com/elastic/elasticsearch-labs?tab=readme-ov-file#elasticsearch-examples--apps[`elasticsearch-labs` repository] for a range of Python notebooks and apps for various use cases. @@ -133,9 +133,9 @@ In {es}, metadata fields are prefixed with an underscore. The most important metadata fields are: -* `_source`. Contains the original JSON document. -* `_index`. The name of the index where the document is stored. -* `_id`. The document's ID. IDs must be unique per index. +* `_source`: Contains the original JSON document. +* `_index`: The name of the index where the document is stored. +* `_id`: The document's ID. IDs must be unique per index. [discrete] [[elasticsearch-intro-documents-fields-mappings]] @@ -146,8 +146,8 @@ A mapping defines the <> for each field, how the field and how it should be stored. When adding documents to {es}, you have two options for mappings: -* <>. Let {es} automatically detect the data types and create the mappings for you. This is great for getting started quickly. -* <>. Define the mappings up front by specifying data types for each field. Recommended for production use cases. +* <>: Let {es} automatically detect the data types and create the mappings for you. This is great for getting started quickly, but can lead to unexpected results for complex data. +* <>: Define the mappings up front by specifying data types for each field. Recommended for production use cases, because you have much more control over how your data is indexed. [TIP] ==== diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index 192f581f28d7..239614345d78 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -33,10 +33,13 @@ mapping values by overriding values in the mapping during the search request. [discrete] [[mapping-dynamic]] == Dynamic mapping -<> allows you to experiment with -and explore data when you’re just getting started. {es} adds new fields -automatically, just by indexing a document. You can add fields to the top-level -mapping, and to inner <> and <> fields. + +When you use <>, {es} automatically +attempts to detect the data type of fields in your documents. This allows +you to get started quickly by just adding data to an index. If you index +additional documents with new fields, {es} will add these fields automatically. +You can add fields to the top-level mapping, and to inner <> +and <> fields. Use <> to define custom mappings that are applied to dynamically added fields based on the matching condition. @@ -44,14 +47,28 @@ applied to dynamically added fields based on the matching condition. [discrete] [[mapping-explicit]] == Explicit mapping -<> allows you to precisely choose how to -define the mapping definition, such as: - -* Which string fields should be treated as full text fields. -* Which fields contain numbers, dates, or geolocations. -* The <> of date values. -* Custom rules to control the mapping for - <>. + +Use <> to define exactly how data types +are mapped to fields, customized to your specific use case. + +Defining your own mappings enables you to: + +* Define which string fields should be treated as full-text fields. +* Define which fields contain numbers, dates, or geolocations. +* Use data types that cannot be automatically detected (such as `geo_point` and `geo_shape`.) +* Choose date value <>, including custom date formats. +* Create custom rules to control the mapping for <>. +* Optimize fields for partial matching. +* Perform language-specific text analysis. + +[TIP] +==== +It’s often useful to index the same field in different ways for different purposes. +For example, you might want to index a string field as both a text field for full-text +search and as a keyword field for sorting or aggregating your data. Or, you might +choose to use more than one language analyzer to process the contents of a string field +that contains user input. +==== Use <> to make schema changes without reindexing. You can use runtime fields in conjunction with indexed fields to diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index d08da2cfc1d2..fc7b6831ca84 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -185,16 +185,18 @@ configured, and defaults otherwise to `transport.tcp.reuse_address`. A transport connection between two nodes is made up of a number of long-lived TCP connections, some of which may be idle for an extended period of time. -Nonetheless, Elasticsearch requires these connections to remain open, and it -can disrupt the operation of your cluster if any inter-node connections are -closed by an external influence such as a firewall. It is important to -configure your network to preserve long-lived idle connections between -Elasticsearch nodes, for instance by leaving `*.tcp.keep_alive` enabled and -ensuring that the keepalive interval is shorter than any timeout that might -cause idle connections to be closed, or by setting `transport.ping_schedule` if -keepalives cannot be configured. Devices which drop connections when they reach -a certain age are a common source of problems to Elasticsearch clusters, and -must not be used. +Nonetheless, {es} requires these connections to remain open, and it can disrupt +the operation of your cluster if any inter-node connections are closed by an +external influence such as a firewall. It is important to configure your network +to preserve long-lived idle connections between {es} nodes, for instance by +leaving `*.tcp.keep_alive` enabled and ensuring that the keepalive interval is +shorter than any timeout that might cause idle connections to be closed, or by +setting `transport.ping_schedule` if keepalives cannot be configured. Devices +which drop connections when they reach a certain age are a common source of +problems to {es} clusters, and must not be used. + +For information about troubleshooting unexpected network disconnections, see +<>. [[request-compression]] ===== Request compression diff --git a/docs/reference/quickstart/run-elasticsearch-locally.asciidoc b/docs/reference/quickstart/run-elasticsearch-locally.asciidoc index 8c75510ae860..24e0f3f22350 100644 --- a/docs/reference/quickstart/run-elasticsearch-locally.asciidoc +++ b/docs/reference/quickstart/run-elasticsearch-locally.asciidoc @@ -114,6 +114,8 @@ docker run -p 127.0.0.1:5601:5601 -d --name kibana --network elastic-net \ {kib-docker-image} ---- +When you access {kib}, use `elastic` as the username and the password you set earlier for the `ELASTIC_PASSWORD` environment variable. + [NOTE] ==== The service is started with a trial license. The trial license enables all features of Elasticsearch for a trial period of 30 days. After the trial period expires, the license is downgraded to a basic license, which is free forever. If you prefer to skip the trial and use the basic license, set the value of the `xpack.license.self_generated.type` variable to basic instead. For a detailed feature comparison between the different licenses, refer to our https://www.elastic.co/subscriptions[subscriptions page]. diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index e10240a66fbb..a54dbe21b46c 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -377,10 +377,19 @@ GET /_xpack/usage "enabled": true, "count": 0, "default_rollover_used": true, - "retention": { - "minimum_millis": 0, - "maximum_millis": 0, - "average_millis": 0.0 + "data_retention": { + "configured_data_streams": 0 + }, + "effective_retention": { + "retained_data_streams": 0 + }, + "global_retention": { + "default": { + "defined": false + }, + "max": { + "defined": false + } } }, "data_tiers" : { diff --git a/docs/reference/search/search-your-data/semantic-search-deploy-model.asciidoc b/docs/reference/search/search-your-data/semantic-search-deploy-model.asciidoc new file mode 100644 index 000000000000..6c610159ae0b --- /dev/null +++ b/docs/reference/search/search-your-data/semantic-search-deploy-model.asciidoc @@ -0,0 +1,97 @@ +[[semantic-search-deployed-nlp-model]] +=== Tutorial: semantic search with a deployed model + +++++ +Semantic search with deployed model +++++ + +[IMPORTANT] +==== +* For the easiest way to perform semantic search in the {stack}, refer to the <> end-to-end tutorial. +* This tutorial was written before the <> and <> was introduced. +Today we have simpler options for performing semantic search. +==== + +This guide shows you how to implement semantic search with models deployed in {es}: from selecting an NLP model, to writing queries. + + +[discrete] +[[deployed-select-nlp-model]] +==== Select an NLP model + +{es} offers the usage of a {ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-embedding[wide range of NLP models], including both dense and sparse vector models. +Your choice of the language model is critical for implementing semantic search successfully. + +While it is possible to bring your own text embedding model, achieving good search results through model tuning is challenging. +Selecting an appropriate model from our third-party model list is the first step. +Training the model on your own data is essential to ensure better search results than using only BM25. +However, the model training process requires a team of data scientists and ML experts, making it expensive and time-consuming. + +To address this issue, Elastic provides a pre-trained representational model called {ml-docs}/ml-nlp-elser.html[Elastic Learned Sparse EncodeR (ELSER)]. +ELSER, currently available only for English, is an out-of-domain sparse vector model that does not require fine-tuning. +This adaptability makes it suitable for various NLP use cases out of the box. +Unless you have a team of ML specialists, it is highly recommended to use the ELSER model. + +In the case of sparse vector representation, the vectors mostly consist of zero values, with only a small subset containing non-zero values. +This representation is commonly used for textual data. +In the case of ELSER, each document in an index and the query text itself are represented by high-dimensional sparse vectors. +Each non-zero element of the vector corresponds to a term in the model vocabulary. +The ELSER vocabulary contains around 30000 terms, so the sparse vectors created by ELSER contain about 30000 values, the majority of which are zero. +Effectively the ELSER model is replacing the terms in the original query with other terms that have been learnt to exist in the documents that best match the original search terms in a training dataset, and weights to control how important each is. + + +[discrete] +[[deployed-deploy-nlp-model]] +==== Deploy the model + +After you decide which model you want to use for implementing semantic search, you need to deploy the model in {es}. + +include::{es-ref-dir}/tab-widgets/semantic-search/deploy-nlp-model-widget.asciidoc[] + + +[discrete] +[[deployed-field-mappings]] +==== Map a field for the text embeddings + +Before you start using the deployed model to generate embeddings based on your input text, you need to prepare your index mapping first. +The mapping of the index depends on the type of model. + +include::{es-ref-dir}/tab-widgets/semantic-search/field-mappings-widget.asciidoc[] + + +[discrete] +[[deployed-generate-embeddings]] +==== Generate text embeddings + +Once you have created the mappings for the index, you can generate text embeddings from your input text. +This can be done by using an +<> with an <>. +The ingest pipeline processes the input data and indexes it into the destination index. +At index time, the inference ingest processor uses the trained model to infer against the data ingested through the pipeline. +After you created the ingest pipeline with the inference processor, you can ingest your data through it to generate the model output. + +include::{es-ref-dir}/tab-widgets/semantic-search/generate-embeddings-widget.asciidoc[] + +Now it is time to perform semantic search! + + +[discrete] +[[deployed-search]] +==== Search the data + +Depending on the type of model you have deployed, you can query rank features with a <> query, or dense vectors with a kNN search. + +include::{es-ref-dir}/tab-widgets/semantic-search/search-widget.asciidoc[] + + +[discrete] +[[deployed-hybrid-search]] +==== Beyond semantic search with hybrid search + +In some situations, lexical search may perform better than semantic search. +For example, when searching for single words or IDs, like product numbers. + +Combining semantic and lexical search into one hybrid search request using <> provides the best of both worlds. +Not only that, but hybrid search using reciprocal rank fusion {blog-ref}improving-information-retrieval-elastic-stack-hybrid[has been shown to perform better in general]. + +include::{es-ref-dir}/tab-widgets/semantic-search/hybrid-search-widget.asciidoc[] \ No newline at end of file diff --git a/docs/reference/search/search-your-data/semantic-search.asciidoc b/docs/reference/search/search-your-data/semantic-search.asciidoc index fa84c3848b78..62e41b3eef3d 100644 --- a/docs/reference/search/search-your-data/semantic-search.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search.asciidoc @@ -7,109 +7,93 @@ Semantic search is a search method that helps you find data based on the intent Using an NLP model enables you to extract text embeddings out of text. Embeddings are vectors that provide a numeric representation of a text. Pieces of content with similar meaning have similar representations. -NLP models can be used in the {stack} various ways, you can: -* deploy models in {es} -* use the <> (recommended) -* use the <> +You have several options for using NLP models in the {stack}: +* use the `semantic_text` workflow (recommended) +* use the {infer} API workflow +* deploy models directly in {es} -[[semantic-search-diagram]] -.A simplified representation of encoding textual concepts as vectors -image::images/search/vector-search-oversimplification.png[A simplified representation of encoding textual concepts as vectors,align="center"] +Refer to <> to choose your workflow. -At query time, {es} can use the same NLP model to convert a query into embeddings, enabling you to find documents with similar text embeddings. +You can also store your own embeddings in {es} as vectors. +Refer to <> for guidance on which query type to use for semantic search. -This guide shows you how to implement semantic search with {es}: From selecting an NLP model, to writing queries. +At query time, {es} can use the same NLP model to convert a query into embeddings, enabling you to find documents with similar text embeddings. -IMPORTANT: For the easiest way to perform semantic search in the {stack}, refer to the <> end-to-end tutorial. [discrete] -[[semantic-search-select-nlp-model]] -=== Select an NLP model - -{es} offers the usage of a -{ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-embedding[wide range of NLP models], including both dense and sparse vector models. -Your choice of the language model is critical for implementing semantic search successfully. - -While it is possible to bring your own text embedding model, achieving good search results through model tuning is challenging. -Selecting an appropriate model from our third-party model list is the first step. -Training the model on your own data is essential to ensure better search results than using only BM25. -However, the model training process requires a team of data scientists and ML experts, making it expensive and time-consuming. - -To address this issue, Elastic provides a pre-trained representational model called {ml-docs}/ml-nlp-elser.html[Elastic Learned Sparse EncodeR (ELSER)]. -ELSER, currently available only for English, is an out-of-domain sparse vector model that does not require fine-tuning. -This adaptability makes it suitable for various NLP use cases out of the box. -Unless you have a team of ML specialists, it is highly recommended to use the ELSER model. - -In the case of sparse vector representation, the vectors mostly consist of zero values, with only a small subset containing non-zero values. -This representation is commonly used for textual data. -In the case of ELSER, each document in an index and the query text itself are represented by high-dimensional sparse vectors. -Each non-zero element of the vector corresponds to a term in the model vocabulary. -The ELSER vocabulary contains around 30000 terms, so the sparse vectors created by ELSER contain about 30000 values, the majority of which are zero. -Effectively the ELSER model is replacing the terms in the original query with other terms that have been learnt to exist in the documents that best match the original search terms in a training dataset, and weights to control how important each is. +[[using-nlp-models]] +=== Choose a semantic search workflow [discrete] -[[semantic-search-deploy-nlp-model]] -=== Deploy the model +==== `semantic_text` workflow -After you decide which model you want to use for implementing semantic search, you need to deploy the model in {es}. +The simplest way to use NLP models in the {stack} is through the <>. +We recommend using this approach because it abstracts away a lot of manual work. +All you need to do is create an {infer} endpoint and an index mapping to start ingesting, embedding, and querying data. +There is no need to define model-related settings and parameters, or to create {infer} ingest pipelines. +Refer to the <> documentation for a list of supported services. -include::{es-ref-dir}/tab-widgets/semantic-search/deploy-nlp-model-widget.asciidoc[] +The <> tutorial shows you the process end-to-end. [discrete] -[[semantic-search-field-mappings]] -=== Map a field for the text embeddings +==== {infer} API workflow -Before you start using the deployed model to generate embeddings based on your input text, you need to prepare your index mapping first. -The mapping of the index depends on the type of model. +The <> is more complex but offers greater control over the {infer} endpoint configuration. +You need to create an {infer} endpoint, provide various model-related settings and parameters, define an index mapping, and set up an {infer} ingest pipeline with the appropriate settings. -include::{es-ref-dir}/tab-widgets/semantic-search/field-mappings-widget.asciidoc[] +The <> tutorial shows you the process end-to-end. [discrete] -[[semantic-search-generate-embeddings]] -=== Generate text embeddings +==== Model deployment workflow -Once you have created the mappings for the index, you can generate text embeddings from your input text. -This can be done by using an -<> with an <>. -The ingest pipeline processes the input data and indexes it into the destination index. -At index time, the inference ingest processor uses the trained model to infer against the data ingested through the pipeline. -After you created the ingest pipeline with the inference processor, you can ingest your data through it to generate the model output. +You can also deploy NLP in {es} manually, without using an {infer} endpoint. +This is the most complex and labor intensive workflow for performing semantic search in the {stack}. +You need to select an NLP model from the {ml-docs}/ml-nlp-model-ref.html#ml-nlp-model-ref-text-embedding[list of supported dense and sparse vector models], deploy it using the Eland client, create an index mapping, and set up a suitable ingest pipeline to start ingesting and querying data. -include::{es-ref-dir}/tab-widgets/semantic-search/generate-embeddings-widget.asciidoc[] +The <> tutorial shows you the process end-to-end. -Now it is time to perform semantic search! [discrete] -[[semantic-search-search]] -=== Search the data +[[using-query]] +=== Using the right query -Depending on the type of model you have deployed, you can query rank features with a <> query, or dense vectors with a kNN search. +Crafting the right query is crucial for semantic search. +Which query you use and which field you target in your queries depends on your chosen workflow. +If you're using the `semantic_text` workflow it's quite simple. +If not, it depends on which type of embeddings you're working with. -include::{es-ref-dir}/tab-widgets/semantic-search/search-widget.asciidoc[] +[cols="30%, 30%, 40%", options="header"] +|======================================================================================================================================================================================================= +| Field type to query | Query to use | Notes +| <> | <> | The `semantic_text` field handles generating embeddings for you at index time and query time. +| <> | <> | The `sparse_vector` query can generate query embeddings for you, but you can also provide your own. You must provide embeddings at index time. +| <> | <> | The `knn` query can generate query embeddings for you, but you can also provide your own. You must provide embeddings at index time. +|======================================================================================================================================================================================================= -[discrete] -[[semantic-search-hybrid-search]] -=== Beyond semantic search with hybrid search +If you want {es} to generate embeddings at both index and query time, use the `semantic_text` field and the `semantic` query. +If you want to bring your own embeddings, use the `sparse_vector` or `dense_vector` field type and the associated query depending on the NLP model you used to generate the embeddings. -In some situations, lexical search may perform better than semantic search. -For example, when searching for single words or IDs, like product numbers. - -Combining semantic and lexical search into one hybrid search request using -<> provides the best of both worlds. -Not only that, but hybrid search using reciprocal rank fusion {blog-ref}improving-information-retrieval-elastic-stack-hybrid[has been shown to perform better in general]. +IMPORTANT: For the easiest way to perform semantic search in the {stack}, refer to the <> end-to-end tutorial. -include::{es-ref-dir}/tab-widgets/semantic-search/hybrid-search-widget.asciidoc[] [discrete] [[semantic-search-read-more]] === Read more * Tutorials: -** <> +** <> +** <> +** <> using the model deployment workflow +** <> ** {ml-docs}/ml-nlp-text-emb-vector-search-example.html[Semantic search with the msmarco-MiniLM-L-12-v3 sentence-transformer model] +* Interactive examples: +** The https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs`] repo contains a number of interactive semantic search examples in the form of executable Python notebooks, using the {es} Python client +** https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/03-ELSER.ipynb[Semantic search with ELSER using the model deployment workflow] +** https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/09-semantic-text.ipynb[Semantic search with `semantic_text`] * Blogs: +** https://www.elastic.co/search-labs/blog/semantic-search-simplified-semantic-text[{es} new semantic_text mapping: Simplifying semantic search] ** {blog-ref}may-2023-launch-sparse-encoder-ai-model[Introducing Elastic Learned Sparse Encoder: Elastic's AI model for semantic search] ** {blog-ref}lexical-ai-powered-search-elastic-vector-database[How to get the best of lexical and AI-powered search with Elastic's vector database] ** Information retrieval blog series: @@ -117,10 +101,10 @@ include::{es-ref-dir}/tab-widgets/semantic-search/hybrid-search-widget.asciidoc[ *** {blog-ref}improving-information-retrieval-elastic-stack-benchmarking-passage-retrieval[Part 2: Benchmarking passage retrieval] *** {blog-ref}may-2023-launch-information-retrieval-elasticsearch-ai-model[Part 3: Introducing Elastic Learned Sparse Encoder, our new retrieval model] *** {blog-ref}improving-information-retrieval-elastic-stack-hybrid[Part 4: Hybrid retrieval] -* Interactive examples: -** The https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs`] repo contains a number of interactive semantic search examples in the form of executable Python notebooks, using the {es} Python client -include::semantic-search-elser.asciidoc[] + include::semantic-search-semantic-text.asciidoc[] include::semantic-search-inference.asciidoc[] +include::semantic-search-elser.asciidoc[] include::cohere-es.asciidoc[] +include::semantic-search-deploy-model.asciidoc[] diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index c7e146a5442c..f6093494b6e5 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -108,7 +108,7 @@ include::skip-set-kernel-parameters.asciidoc[] ifeval::["{release-state}"=="unreleased"] -WARNING: Version {version} of Elasticsearch has not yet been released. +WARNING: Version {version} of Elasticsearch has not yet been released. The package might not be available. endif::[] diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index 60815d570ab3..085c48b0ce4b 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -101,7 +101,7 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -WARNING: Version {version} of Elasticsearch has not yet been released. +WARNING: Version {version} of Elasticsearch has not yet been released. The RPM might not be available. endif::[] diff --git a/docs/reference/setup/install/targz.asciidoc b/docs/reference/setup/install/targz.asciidoc index d40a4bfdd7e7..cab5be47a8c4 100644 --- a/docs/reference/setup/install/targz.asciidoc +++ b/docs/reference/setup/install/targz.asciidoc @@ -21,7 +21,7 @@ see the <> ifeval::["{release-state}"=="unreleased"] -WARNING: Version {version} of {es} has not yet been released. +WARNING: Version {version} of {es} has not yet been released. The archive might not be available. endif::[] @@ -44,7 +44,7 @@ cd elasticsearch-{version}/ <2> ifeval::["{release-state}"=="unreleased"] -WARNING: Version {version} of {es} has not yet been released. +WARNING: Version {version} of {es} has not yet been released. The archive might not be available. endif::[] diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index eb84ff149f8b..27a330debd81 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -31,7 +31,7 @@ see the <> ifeval::["{release-state}"=="unreleased"] -WARNING: Version {version} of {es} has not yet been released. +WARNING: Version {version} of {es} has not yet been released. The archive might not be available. endif::[] diff --git a/docs/reference/setup/stopping.asciidoc b/docs/reference/setup/stopping.asciidoc index 8c3a8d40fa1d..f80812f02693 100644 --- a/docs/reference/setup/stopping.asciidoc +++ b/docs/reference/setup/stopping.asciidoc @@ -50,9 +50,14 @@ such a shutdown, it does not go through an orderly shutdown as described above. process will also return with a special status code indicating the nature of the error. [horizontal] +Killed by jvmkiller agent:: 158 +User or kernel SIGTERM:: 143 +Slain by kernel oom-killer:: 137 +Segmentation fault:: 134 JVM internal error:: 128 Out of memory error:: 127 Stack overflow error:: 126 Unknown virtual machine error:: 125 Serious I/O error:: 124 +Bootstrap check failure:: 78 Unknown fatal error:: 1 diff --git a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc index 6cdf65ba54e7..b8bb6a2cd7d1 100644 --- a/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc +++ b/docs/reference/snapshot-restore/apis/snapshot-restore-apis.asciidoc @@ -28,6 +28,7 @@ For more information, see <>. include::put-repo-api.asciidoc[] include::verify-repo-api.asciidoc[] include::repo-analysis-api.asciidoc[] +include::verify-repo-integrity-api.asciidoc[] include::get-repo-api.asciidoc[] include::delete-repo-api.asciidoc[] include::clean-up-repo-api.asciidoc[] diff --git a/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc b/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc new file mode 100644 index 000000000000..99ae126b401f --- /dev/null +++ b/docs/reference/snapshot-restore/apis/verify-repo-integrity-api.asciidoc @@ -0,0 +1,232 @@ +[role="xpack"] +[[verify-repo-integrity-api]] +=== Verify repository integrity API +++++ +Verify repository integrity +++++ + +Verifies the integrity of the contents of a snapshot repository. + +//// +[source,console] +---- +PUT /_snapshot/my_repository +{ + "type": "fs", + "settings": { + "location": "my_backup_location" + } +} +---- +// TESTSETUP +//// + +[source,console] +---- +POST /_snapshot/my_repository/_verify_integrity +---- + +[[verify-repo-integrity-api-request]] +==== {api-request-title} + +`POST /_snapshot//_verify_integrity` + +[[verify-repo-integrity-api-prereqs]] +==== {api-prereq-title} + +* If the {es} {security-features} are enabled, you must have the `manage` +<> to use this API. For more +information, see <>. + +[[verify-repo-integrity-api-desc]] +==== {api-description-title} + +This API allows you to perform a comprehensive check of the contents of a +repository, looking for any anomalies in its data or metadata which might +prevent you from restoring snapshots from the repository or which might cause +future snapshot create or delete operations to fail. + +If you suspect the integrity of the contents of one of your snapshot +repositories, cease all write activity to this repository immediately, set its +`read_only` option to `true`, and use this API to verify its integrity. Until +you do so: + +* It may not be possible to <> from this repository. + +* <> may report errors when searched, or may have + unassigned shards. + +* <> into this repository may fail, + or may appear to succeed having created a snapshot which cannot be restored. + +* <> from this repository may fail, or + may appear to succeed leaving the underlying data on disk. + +* Continuing to write to the repository while it is in an invalid state may + causing additional damage to its contents. + +If the <> API finds any problems with the integrity +of the contents of your repository, {es} will not be able to repair the damage. +The only way to bring the repository back into a fully working state after its +contents have been damaged is by restoring its contents from a +<> which was taken before the +damage occurred. You must also identify what caused the damage and take action +to prevent it from happening again. + +If you cannot restore a repository backup, +<> and use this for +all future snapshot operations. In some cases it may be possible to recover +some of the contents of a damaged repository, either by +<> as many of its snapshots as needed and +<> of the restored data, or by +using the <> API to copy data from any <> +mounted from the damaged repository. + +Avoid all operations which write to the repository while the +<> API is running. If something changes the +repository contents while an integrity verification is running then {es} may +incorrectly report having detected some anomalies in its contents due to the +concurrent writes. It may also incorrectly fail to report some anomalies that +the concurrent writes prevented it from detecting. + +NOTE: This API is intended for exploratory use by humans. You should expect the +request parameters and the response format to vary in future versions. + +NOTE: This API may not work correctly in a mixed-version cluster. + +[[verify-repo-integrity-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +Name of the snapshot repository whose integrity to verify. + +[[verify-repo-integrity-api-query-params]] +==== {api-query-parms-title} + +The default values for the parameters of this API are designed to limit the +impact of the integrity verification on other activities in your cluster. For +instance, by default it will only use at most half of the `snapshot_meta` +threads to verify the integrity of each snapshot, allowing other snapshot +operations to use the other half of this thread pool. + +If you modify these parameters to speed up the verification process, you risk +disrupting other snapshot-related operations in your cluster. For large +repositories, consider setting up a separate single-node {es} cluster just for +running the integrity verification API. + +`snapshot_verification_concurrency`:: +(Optional, integer) Specifies the number of snapshots to verify concurrently. +Defaults to `0` which means to use at most half of the `snapshot_meta` thread +pool at once. + +`index_verification_concurrency`:: +(Optional, integer) Specifies the number of indices to verify concurrently. +Defaults to `0` which means to use the entire `snapshot_meta` thread pool. + +`meta_thread_pool_concurrency`:: +(Optional, integer) Specifies the maximum number of snapshot metadata +operations to execute concurrently. Defaults to `0` which means to use at most +half of the `snapshot_meta` thread pool at once. + +`index_snapshot_verification_concurrency`:: +(Optional, integer) Specifies the maximum number of index snapshots to verify +concurrently within each index verification. Defaults to `1`. + +`max_failed_shard_snapshots`:: +(Optional, integer) Limits the number of shard snapshot failures to track +during integrity verification, in order to avoid excessive resource usage. If +your repository contains more than this number of shard snapshot failures then +the verification will fail. Defaults to `10000`. + +`verify_blob_contents`:: +(Optional, boolean) Specifies whether to verify the checksum of every data blob +in the repository. Defaults to `false`. If this feature is enabled, {es} will +read the entire repository contents, which may be extremely slow and expensive. + +`blob_thread_pool_concurrency`:: +(Optional, integer) If `?verify_blob_contents` is `true`, this parameter +specifies how many blobs to verify at once. Defaults to `1`. + +`max_bytes_per_sec`:: +(Optional, <>) +If `?verify_blob_contents` is `true`, this parameter specifies the maximum +amount of data that {es} will read from the repository every second. Defaults +to `10mb`. + +[role="child_attributes"] +[[verify-repo-integrity-api-response-body]] +==== {api-response-body-title} + +The response exposes implementation details of the analysis which may change +from version to version. The response body format is therefore not considered +stable and may be different in newer versions. + +`log`:: +(array) A sequence of objects that report the progress of the analysis. ++ +.Properties of `log` +[%collapsible%open] +==== +`timestamp_in_millis`:: +(integer) The timestamp of this log entry, represented as the number of +milliseconds since the {wikipedia}/Unix_time[Unix epoch]. + +`timestamp`:: +(string) The timestamp of this log entry, represented as a string formatted +according to {wikipedia}/ISO_8601[ISO 8601]. Only included if the +<> flag is set. + +`snapshot`:: +(object) If the log entry pertains to a particular snapshot then the snapshot +will be described in this object. + +`index`:: +(object) If the log entry pertains to a particular index then the index will be +described in this object. + +`snapshot_restorability`:: +(object) If the log entry pertains to the restorability of an index then the +details will be described in this object. + +`anomaly`:: +(string) If the log entry pertains to an anomaly in the repository contents then +this string will describe the anomaly. + +`exception`:: +(object) If the log entry pertains to an exception that {es} encountered during +the verification then the details will be included in this object. + +==== + +`results`:: +(object) An object which describes the final results of the analysis. ++ +.Properties of `results` +[%collapsible%open] +==== +`status`:: +(object) The final status of the analysis task. + +`final_repository_generation`:: +(integer) The repository generation at the end of the analysis. If there were +any writes to the repository during the analysis then this value will be +different from the `generation` reported in the task status, and the analysis +may have detected spurious anomalies due to the concurrent writes, or may even +have failed to detect some anomalies in the repository contents. + +`total_anomalies`:: +(integer) The total number of anomalies detected during the analysis. + +`result`:: +(string) The final result of the analysis. If the repository contents appear to +be intact then this will be the string `pass`. If this field is missing, or +contains some other value, then the repository contents were not fully +verified. + +==== + +`exception`:: +(object) If the analysis encountered an exception which prevented it from +completing successfully then this exception will be reported here. diff --git a/docs/reference/snapshot-restore/register-repository.asciidoc b/docs/reference/snapshot-restore/register-repository.asciidoc index 28b0640a8fae..2147ad3c684f 100644 --- a/docs/reference/snapshot-restore/register-repository.asciidoc +++ b/docs/reference/snapshot-restore/register-repository.asciidoc @@ -272,7 +272,9 @@ filesystem snapshot of this repository. When restoring a repository from a backup, you must not register the repository with {es} until the repository contents are fully restored. If you alter the contents of a repository while it is registered with {es} then the repository -may become unreadable or may silently lose some of its contents. +may become unreadable or may silently lose some of its contents. After +restoring a repository from a backup, use the <> API +to verify its integrity before you start to use the repository. include::repository-azure.asciidoc[] include::repository-gcs.asciidoc[] diff --git a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java index 74acb00925e5..e0c18f35f6cb 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java +++ b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java @@ -17,13 +17,16 @@ */ public enum RestApiVersion { + V_9(9), + V_8(8), - @UpdateForV9 // v9 will not need to support the v7 REST API V_7(7); public final byte major; + @UpdateForV9 + // We need to bump current and previous to V_9 and V_8, respectively private static final RestApiVersion CURRENT = V_8; private static final RestApiVersion PREVIOUS = V_7; @@ -49,6 +52,7 @@ public static RestApiVersion minimumSupported() { public static Predicate equalTo(RestApiVersion restApiVersion) { return switch (restApiVersion) { + case V_9 -> r -> r.major == V_9.major; case V_8 -> r -> r.major == V_8.major; case V_7 -> r -> r.major == V_7.major; }; @@ -56,11 +60,14 @@ public static Predicate equalTo(RestApiVersion restApiVersion) { public static Predicate onOrAfter(RestApiVersion restApiVersion) { return switch (restApiVersion) { + case V_9 -> r -> r.major >= V_9.major; case V_8 -> r -> r.major >= V_8.major; case V_7 -> r -> r.major >= V_7.major; }; } + @UpdateForV9 + // Right now we return api version 8 for major version 9 until we bump the api version above public static RestApiVersion forMajor(int major) { switch (major) { case 7 -> { @@ -69,6 +76,9 @@ public static RestApiVersion forMajor(int major) { case 8 -> { return V_8; } + case 9 -> { + return V_8; + } default -> throw new IllegalArgumentException("Unknown REST API version " + major); } } diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java index c59f003d9cb0..63191084ca83 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java @@ -111,7 +111,7 @@ public String text() throws IOException { } private void throwOnNoText() { - throw new IllegalStateException("Can't get text on a " + currentToken() + " at " + getTokenLocation()); + throw new IllegalArgumentException("Expected text at " + getTokenLocation() + " but found " + currentToken()); } @Override diff --git a/modules/aggregations/build.gradle b/modules/aggregations/build.gradle index 91f3303d9d4a..5e233f423aa1 100644 --- a/modules/aggregations/build.gradle +++ b/modules/aggregations/build.gradle @@ -36,30 +36,6 @@ if (BuildParams.isSnapshotBuild() == false) { } } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("search.aggregation/20_terms/string profiler via global ordinals filters implementation", "The profiler results aren't backwards compatible.") - task.skipTest("search.aggregation/20_terms/string profiler via global ordinals native implementation", "The profiler results aren't backwards compatible.") - task.skipTest("search.aggregation/20_terms/string profiler via map", "The profiler results aren't backwards compatible.") - task.skipTest("search.aggregation/20_terms/numeric profiler", "The profiler results aren't backwards compatible.") - task.skipTest("search.aggregation/210_top_hits_nested_metric/top_hits aggregation with sequence numbers", "#42809 the use nested path and filter sort throws an exception") - task.skipTest("search.aggregation/370_doc_count_field/Test filters agg with doc_count", "Uses profiler for assertions which is not backwards compatible") - - // In 8.9.0, the default t-digest algorithm changed from AVL-tree-based to hybrid, combining a sorted array of samples with a merging - // implementation. This change leads to slight different percentile results, compared to previous versions. - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Basic test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Non-keyed test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Only aggs test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Explicit Percents test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Metadata test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/180_percentiles_tdigest_metric/Filtered test", "Hybrid t-digest produces different results.") - task.skipTest("search.aggregation/420_percentile_ranks_tdigest_metric/filtered", "Hybrid t-digest produces different results.") - - // Something has changed with response codes - task.skipTest("search.aggregation/20_terms/IP test", "Hybrid t-digest produces different results.") - - task.addAllowedWarningRegex("\\[types removal\\].*") -} - artifacts { restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index 1fc42a1b294f..b43124f52552 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -29,13 +29,6 @@ dependencies { clusterModules project(':modules:mapper-extras') } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("indices.analyze/10_analyze/htmlStrip_deprecated", "Cleanup versioned deprecations in analysis #41560") - task.skipTest("analysis-common/40_token_filters/delimited_payload_filter_error", "Remove preconfigured delimited_payload_filter #43686") - task.skipTest("analysis-common/20_analyzers/standard_html_strip", "Cleanup versioned deprecations in analysis #41560") - task.skipTest("search.query/50_queries_with_synonyms/Test common terms query with stacked tokens", "#42654 - `common` query throws an exception") -} - artifacts { restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java index c18cb3dddf0a..4c8e88a0cedb 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -52,25 +52,6 @@ public void testNGramFilterInCustomAnalyzerDeprecationError() throws IOException ex.getMessage() ); } - - final Settings settingsPre7 = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put( - IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0) - ) - .put("index.analysis.analyzer.custom_analyzer.type", "custom") - .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") - .put("index.analysis.filter.my_ngram.type", "nGram") - .build(); - try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); - assertWarnings( - "The [nGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [ngram] instead." - ); - } } /** @@ -101,26 +82,6 @@ public void testEdgeNGramFilterInCustomAnalyzerDeprecationError() throws IOExcep ex.getMessage() ); } - - final Settings settingsPre7 = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put( - IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_6_0) - ) - .put("index.analysis.analyzer.custom_analyzer.type", "custom") - .put("index.analysis.analyzer.custom_analyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.custom_analyzer.filter", "my_ngram") - .put("index.analysis.filter.my_ngram.type", "edgeNGram") - .build(); - - try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { - createTestAnalysis(IndexSettingsModule.newIndexSettings("index", settingsPre7), settingsPre7, commonAnalysisPlugin); - assertWarnings( - "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [edge_ngram] instead." - ); - } } /** @@ -128,39 +89,6 @@ public void testEdgeNGramFilterInCustomAnalyzerDeprecationError() throws IOExcep * disallow usages for indices created after 8.0 */ public void testNGramTokenizerDeprecation() throws IOException { - // tests for prebuilt tokenizer - doTestPrebuiltTokenizerDeprecation( - "nGram", - "ngram", - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), - false - ); - doTestPrebuiltTokenizerDeprecation( - "edgeNGram", - "edge_ngram", - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), - false - ); - doTestPrebuiltTokenizerDeprecation( - "nGram", - "ngram", - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_6_0, - IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) - ), - true - ); - doTestPrebuiltTokenizerDeprecation( - "edgeNGram", - "edge_ngram", - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_6_0, - IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) - ), - true - ); expectThrows( IllegalArgumentException.class, () -> doTestPrebuiltTokenizerDeprecation( @@ -179,40 +107,6 @@ public void testNGramTokenizerDeprecation() throws IOException { true ) ); - - // same batch of tests for custom tokenizer definition in the settings - doTestCustomTokenizerDeprecation( - "nGram", - "ngram", - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), - false - ); - doTestCustomTokenizerDeprecation( - "edgeNGram", - "edge_ngram", - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersions.V_7_5_2), - false - ); - doTestCustomTokenizerDeprecation( - "nGram", - "ngram", - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_6_0, - IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) - ), - true - ); - doTestCustomTokenizerDeprecation( - "edgeNGram", - "edge_ngram", - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_6_0, - IndexVersion.max(IndexVersions.V_7_6_0, IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0)) - ), - true - ); expectThrows( IllegalArgumentException.class, () -> doTestCustomTokenizerDeprecation( diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java index 412e3ba3e380..48bc60b5ad0b 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerTests.java @@ -17,14 +17,12 @@ import org.elasticsearch.index.IndexService.IndexCreationContext; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.index.IndexVersionUtils; import java.io.IOException; import java.io.StringReader; @@ -47,61 +45,10 @@ private IndexAnalyzers buildAnalyzers(IndexVersion version, String tokenizer) th } public void testPreConfiguredTokenizer() throws IOException { - - // Before 7.3 we return ngrams of length 1 only - { - IndexVersion version = IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - IndexVersionUtils.getPreviousVersion(IndexVersions.V_7_3_0) - ); - try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edge_ngram")) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "test", new String[] { "t" }); - } - } - - // Check deprecated name as well - { - IndexVersion version = IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - IndexVersionUtils.getPreviousVersion(IndexVersions.V_7_3_0) - ); - try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edgeNGram")) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "test", new String[] { "t" }); - } - } - - // Afterwards, we return ngrams of length 1 and 2, to match the default factory settings - { - try (IndexAnalyzers indexAnalyzers = buildAnalyzers(IndexVersion.current(), "edge_ngram")) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" }); - } - } - - // Check deprecated name as well, needs version before 8.0 because throws IAE after that - { - try ( - IndexAnalyzers indexAnalyzers = buildAnalyzers( - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_3_0, - IndexVersionUtils.getPreviousVersion(IndexVersions.V_8_0_0) - ), - "edgeNGram" - ) - ) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" }); - - } + try (IndexAnalyzers indexAnalyzers = buildAnalyzers(IndexVersion.current(), "edge_ngram")) { + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "test", new String[] { "t", "te" }); } } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java index 7a2bd2a82298..16288c754e92 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java @@ -337,7 +337,7 @@ public void testShingleFilters() { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .put("index.analysis.filter.synonyms.type", "synonym") @@ -391,7 +391,7 @@ public void testPreconfiguredTokenFilters() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .build(); @@ -423,7 +423,7 @@ public void testDisallowedTokenFilters() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .putList("common_words", "a", "b") diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java index 68e6d6661f94..39fda0636303 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.index.IndexService.IndexCreationContext; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalysisTestsHelper; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -25,7 +24,6 @@ import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.index.IndexVersionUtils; import java.io.IOException; import java.io.StringReader; @@ -180,61 +178,26 @@ public void testIgnoreKeywords() throws IOException { } public void testPreconfiguredFilter() throws IOException { - // Before 7.3 we don't adjust offsets - { - Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); - Settings indexSettings = Settings.builder() - .put( - IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - IndexVersionUtils.getPreviousVersion(IndexVersions.V_7_3_0) - ) - ) - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") - .build(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - - try ( - IndexAnalyzers indexAnalyzers = new AnalysisModule( - TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()), - new StablePluginsRegistry() - ).getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings) - ) { - - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "h100", new String[] { "h", "100" }, new int[] { 0, 0 }, new int[] { 4, 4 }); - - } - } - - // Afger 7.3 we do adjust offsets - { - Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); - Settings indexSettings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") - .build(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - try ( - IndexAnalyzers indexAnalyzers = new AnalysisModule( - TestEnvironment.newEnvironment(settings), - Collections.singletonList(new CommonAnalysisPlugin()), - new StablePluginsRegistry() - ).getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings) - ) { + try ( + IndexAnalyzers indexAnalyzers = new AnalysisModule( + TestEnvironment.newEnvironment(settings), + Collections.singletonList(new CommonAnalysisPlugin()), + new StablePluginsRegistry() + ).getAnalysisRegistry().build(IndexCreationContext.CREATE_INDEX, idxSettings) + ) { - NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); - assertNotNull(analyzer); - assertAnalyzesTo(analyzer, "h100", new String[] { "h", "100" }, new int[] { 0, 1 }, new int[] { 1, 4 }); + NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer"); + assertNotNull(analyzer); + assertAnalyzesTo(analyzer, "h100", new String[] { "h", "100" }, new int[] { 0, 1 }, new int[] { 1, 4 }); - } } } } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index 802e599b89f1..71c26372dac5 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -317,22 +317,24 @@ body: text: "a/b/c" explain: true - tokenizer: - type: PathHierarchy + tokenizer: path_hierarchy - length: { detail.tokenizer.tokens: 3 } - - match: { detail.tokenizer.name: __anonymous__PathHierarchy } + - match: { detail.tokenizer.name: path_hierarchy } - match: { detail.tokenizer.tokens.0.token: a } - match: { detail.tokenizer.tokens.1.token: a/b } - match: { detail.tokenizer.tokens.2.token: a/b/c } +--- +"PathHierarchy": - do: indices.analyze: body: text: "a/b/c" explain: true - tokenizer: path_hierarchy + tokenizer: + type: PathHierarchy - length: { detail.tokenizer.tokens: 3 } - - match: { detail.tokenizer.name: path_hierarchy } + - match: { detail.tokenizer.name: __anonymous__PathHierarchy } - match: { detail.tokenizer.tokens.0.token: a } - match: { detail.tokenizer.tokens.1.token: a/b } - match: { detail.tokenizer.tokens.2.token: a/b/c } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index e3da69b7b2f0..ebe5546c0907 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -597,8 +597,8 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { false ); verifyResolvability(dataStreamName, indicesAdmin().prepareGetSettings(dataStreamName), false); - verifyResolvability(dataStreamName, clusterAdmin().prepareHealth(dataStreamName), false); - verifyResolvability(dataStreamName, clusterAdmin().prepareState().setIndices(dataStreamName), false); + verifyResolvability(dataStreamName, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, dataStreamName), false); + verifyResolvability(dataStreamName, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(dataStreamName), false); verifyResolvability(dataStreamName, client().prepareFieldCaps(dataStreamName).setFields("*"), false); verifyResolvability(dataStreamName, indicesAdmin().prepareGetIndex().addIndices(dataStreamName), false); verifyResolvability(dataStreamName, indicesAdmin().prepareOpen(dataStreamName), false); @@ -644,8 +644,8 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { indicesAdmin().prepareUpdateSettings(wildcardExpression).setSettings(Settings.builder().put("index.number_of_replicas", 0)), false ); - verifyResolvability(wildcardExpression, clusterAdmin().prepareHealth(wildcardExpression), false); - verifyResolvability(wildcardExpression, clusterAdmin().prepareState().setIndices(wildcardExpression), false); + verifyResolvability(wildcardExpression, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, wildcardExpression), false); + verifyResolvability(wildcardExpression, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices(wildcardExpression), false); verifyResolvability(wildcardExpression, client().prepareFieldCaps(wildcardExpression).setFields("*"), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareGetIndex().addIndices(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareOpen(wildcardExpression), false); @@ -1594,7 +1594,7 @@ public void testClusterStateIncludeDataStream() throws Exception { client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); // when querying a backing index then the data stream should be included as well. - ClusterStateRequest request = new ClusterStateRequest().indices(".ds-metrics-foo-*000001"); + ClusterStateRequest request = new ClusterStateRequest(TEST_REQUEST_TIMEOUT).indices(".ds-metrics-foo-*000001"); ClusterState state = clusterAdmin().state(request).get().getState(); assertThat(state.metadata().dataStreams().size(), equalTo(1)); assertThat(state.metadata().dataStreams().get("metrics-foo").getName(), equalTo("metrics-foo")); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java index a52016e8c7f0..66bb06ca4240 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/IngestFailureStoreMetricsIT.java @@ -19,14 +19,11 @@ import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Template; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.core.Strings; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -237,7 +234,7 @@ public void testRerouteSuccessfulCorrectName() throws IOException { createDataStream(); String destination = dataStream + "-destination"; - final var createDataStreamRequest = new CreateDataStreamAction.Request(destination); + final var createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, destination); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); createReroutePipeline(destination); @@ -306,7 +303,7 @@ private void putComposableIndexTemplate(boolean failureStore) throws IOException } private void createDataStream() { - final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStream); + final var createDataStreamRequest = new CreateDataStreamAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataStream); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); } @@ -319,9 +316,7 @@ private void createReroutePipeline(String destination) { } private void createPipeline(String processor) { - String pipelineDefinition = Strings.format("{\"processors\": [{%s}]}", processor); - BytesReference bytes = new BytesArray(pipelineDefinition); - clusterAdmin().putPipeline(new PutPipelineRequest(pipeline, bytes, XContentType.JSON)).actionGet(); + putJsonPipeline(pipeline, Strings.format("{\"processors\": [{%s}]}", processor)); } private void indexDocs(String dataStream, int numDocs, String pipeline) { diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java index ef785086a0ef..7fdc3b660433 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/ResolveClusterDataStreamIT.java @@ -405,7 +405,7 @@ private Map setupThreeClusters(boolean useAlias) throws IOExcept assertFalse( client(REMOTE_CLUSTER_2).admin() .cluster() - .prepareHealth(remoteIndex2) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex2) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index ee17521ad757..65f911d27bf6 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -917,7 +917,7 @@ public void testDataLifecycleServiceConfiguresTheMergePolicy() throws Exception String firstGenerationIndex = getBackingIndices(dataStreamName).get(0); ClusterGetSettingsAction.Response response = client().execute( ClusterGetSettingsAction.INSTANCE, - new ClusterGetSettingsAction.Request() + new ClusterGetSettingsAction.Request(TEST_REQUEST_TIMEOUT) ).get(); Settings clusterSettings = response.persistentSettings(); @@ -1093,7 +1093,7 @@ public void testLifecycleAppliedToFailureStore() throws Exception { // Let's verify the merge settings ClusterGetSettingsAction.Response response = client().execute( ClusterGetSettingsAction.INSTANCE, - new ClusterGetSettingsAction.Request() + new ClusterGetSettingsAction.Request(TEST_REQUEST_TIMEOUT) ).get(); Settings clusterSettings = response.persistentSettings(); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeCustomSettingsIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeCustomSettingsIT.java index d3a2867fe2ec..f0279702812c 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeCustomSettingsIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/LogsIndexModeCustomSettingsIT.java @@ -283,35 +283,6 @@ public void testOverrideIgnoreDynamicBeyondLimit() throws IOException { assertThat(ignoreDynamicBeyondLimitIndexSetting, equalTo("false")); } - public void testAddNonCompatibleMapping() throws IOException { - var nonCompatibleMappingAdditionTemplate = """ - { - "template": { - "mappings": { - "properties": { - "bomb": { - "type": "ip", - "doc_values": false - } - } - } - } - }"""; - - Exception e = assertThrows( - ResponseException.class, - () -> putComponentTemplate(client, "logs@custom", nonCompatibleMappingAdditionTemplate) - ); - assertThat( - e.getMessage(), - containsString("updating component template [logs@custom] results in invalid composable template [logs]") - ); - assertThat( - e.getMessage(), - containsString("field [bomb] of type [ip] doesn't support synthetic source because it doesn't have doc values") - ); - } - private static Map getMapping(final RestClient client, final String indexName) throws IOException { final Request request = new Request("GET", "/" + indexName + "/_mapping"); diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java index ad4302cb04b4..404914cda6a7 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/logsdb/qa/StandardVersusLogsIndexModeRandomDataChallengeRestIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.logsdb.datageneration.DataGenerator; import org.elasticsearch.logsdb.datageneration.DataGeneratorSpecification; import org.elasticsearch.logsdb.datageneration.FieldDataGenerator; -import org.elasticsearch.logsdb.datageneration.FieldType; import org.elasticsearch.logsdb.datageneration.datasource.DataSourceHandler; import org.elasticsearch.logsdb.datageneration.datasource.DataSourceRequest; import org.elasticsearch.logsdb.datageneration.datasource.DataSourceResponse; @@ -78,7 +77,18 @@ public DataSourceResponse.ObjectMappingParametersGenerator handle(DataSourceRequ })) .withPredefinedFields( List.of( - new PredefinedField.WithType("host.name", FieldType.KEYWORD), + // Customized because it always needs doc_values for aggregations. + new PredefinedField.WithGenerator("host.name", new FieldDataGenerator() { + @Override + public CheckedConsumer mappingWriter() { + return b -> b.startObject().field("type", "keyword").endObject(); + } + + @Override + public CheckedConsumer fieldValueGenerator() { + return b -> b.value(randomAlphaOfLength(5)); + } + }), // Needed for terms query new PredefinedField.WithGenerator("method", new FieldDataGenerator() { @Override diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 99d4f8bb7cd2..d8f8ae9d080a 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -561,7 +561,13 @@ private Set waitForInProgressOrTriggerDownsampling( * Issues a request downsample the source index to the downsample index for the specified round. */ private void downsampleIndexOnce(DataStreamLifecycle.Downsampling.Round round, String sourceIndex, String downsampleIndexName) { - DownsampleAction.Request request = new DownsampleAction.Request(sourceIndex, downsampleIndexName, null, round.config()); + DownsampleAction.Request request = new DownsampleAction.Request( + TimeValue.THIRTY_SECONDS /* TODO should this be longer/configurable? */, + sourceIndex, + downsampleIndexName, + null, + round.config() + ); transportActionsDeduplicator.executeOnce( request, new ErrorRecordingActionListener( diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java index c3fd47961631..29cda588bc26 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestGetDataStreamsAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestUtils; @@ -61,17 +62,19 @@ public Set supportedCapabilities() { @Override public Set supportedQueryParameters() { - return Set.of( - "name", - "include_defaults", - "timeout", - "master_timeout", - RestRequest.PATH_RESTRICTED, - IndicesOptions.WildcardOptions.EXPAND_WILDCARDS, - IndicesOptions.ConcreteTargetOptions.IGNORE_UNAVAILABLE, - IndicesOptions.WildcardOptions.ALLOW_NO_INDICES, - IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED, - "verbose" + return Sets.union( + RestRequest.INTERNAL_MARKER_REQUEST_PARAMETERS, + Set.of( + "name", + "include_defaults", + "timeout", + "master_timeout", + IndicesOptions.WildcardOptions.EXPAND_WILDCARDS, + IndicesOptions.ConcreteTargetOptions.IGNORE_UNAVAILABLE, + IndicesOptions.WildcardOptions.ALLOW_NO_INDICES, + IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED, + "verbose" + ) ); } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java index bc313d145c17..b2ddab164b31 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamsStatsTests.java @@ -131,7 +131,10 @@ public void testStatsClosedBackingIndexDataStream() throws Exception { assertTrue(indicesAdmin().close(new CloseIndexRequest(".ds-" + dataStreamName + "-*-000001")).actionGet().isAcknowledged()); assertBusy( - () -> assertNotEquals(ClusterHealthStatus.RED, clusterAdmin().health(new ClusterHealthRequest()).actionGet().getStatus()) + () -> assertNotEquals( + ClusterHealthStatus.RED, + clusterAdmin().health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT)).actionGet().getStatus() + ) ); DataStreamsStatsAction.Response stats = getDataStreamsStats(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java index a61258726246..e3d5ad0d63e8 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java @@ -118,7 +118,9 @@ public void testLookAheadTimeSettingHigherThanTimeSeriesPollIntervalSetting() { } private void updateClusterSettings(Settings settings) { - clusterAdmin().updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(settings)).actionGet(); + clusterAdmin().updateSettings( + new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings(settings) + ).actionGet(); } private void updateIndexSettings(Settings settings) { diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index 991504b27f65..af3204ed443a 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -748,3 +748,43 @@ teardown: indices.delete: index: .fs-logs-foobar-* - is_true: acknowledged + +--- +"Version conflicts are not redirected to failure store": + - requires: + cluster_features: ["gte_v8.16.0"] + reason: "Redirecting version conflicts to the failure store is considered a bug fixed in 8.16" + test_runner_features: [allowed_warnings, contains] + + - do: + allowed_warnings: + - "index template [generic_logs_template] has index patterns [logs-*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [generic_logs_template] will take precedence during new index creation" + indices.put_index_template: + name: generic_logs_template + body: + index_patterns: logs-* + data_stream: + failure_store: true + template: + settings: + number_of_shards: 1 + number_of_replicas: 1 + mappings: + properties: + '@timestamp': + type: date + count: + type: long + + - do: + bulk: + refresh: true + body: + - '{ "create": { "_index": "logs-foobar", "_id": "1" } }' + - '{ "@timestamp": "2022-01-01", "baz": "quick", "a": "brown", "b": "fox" }' + - '{ "create": { "_index": "logs-foobar", "_id": "1" } }' + - '{ "@timestamp": "2022-01-01", "baz": "lazy", "a": "dog" }' + - is_true: errors + - match: { items.1.create._index: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.1.create.status: 409 } + - match: { items.1.create.error.type: version_conflict_engine_exception} diff --git a/modules/health-shards-availability/build.gradle b/modules/health-shards-availability/build.gradle index 6c7cf5a19c8a..b98824d84af9 100644 --- a/modules/health-shards-availability/build.gradle +++ b/modules/health-shards-availability/build.gradle @@ -19,7 +19,3 @@ restResources { include '_common', 'indices', 'index', 'cluster', 'nodes', 'get', 'ingest' } } - -tasks.named("yamlRestTestV7CompatTransform").configure {task -> - task.addAllowedWarningRegex("setting \\[ecs\\] is deprecated as ECS format is the default and only option") -} diff --git a/modules/ingest-attachment/build.gradle b/modules/ingest-attachment/build.gradle index 89f0b530713c..f708448c10d7 100644 --- a/modules/ingest-attachment/build.gradle +++ b/modules/ingest-attachment/build.gradle @@ -138,14 +138,6 @@ tasks.named("forbiddenPatterns").configure { exclude '**/text-cjk-*.txt' } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - // 2 new tika metadata fields are returned in v8 - task.replaceValueInLength("_source.attachment", 8, "Test ingest attachment processor with .doc file") - task.replaceValueInLength("_source.attachment", 8, "Test ingest attachment processor with .docx file") - // Tika 2.4.0 adds an extra newline for each embedded attachment, making the content_length larger - task.replaceValueInMatch("_source.attachment.content_length", 20, "Test ingest attachment processor with .docx file") -} - tasks.named("thirdPartyAudit").configure { ignoreMissingClasses() } @@ -153,5 +145,5 @@ tasks.named("thirdPartyAudit").configure { if (BuildParams.inFipsJvm) { tasks.named("test").configure { enabled = false } tasks.named("yamlRestTest").configure { enabled = false }; - tasks.named("yamlRestTestV7CompatTest").configure { enabled = false }; + tasks.named("yamlRestCompatTest").configure { enabled = false }; } diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index d7100745680b..ee923132aa6a 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -49,7 +49,3 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.commons.logging.LogFactory', ) } - -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.addAllowedWarningRegex("\\[types removal\\].*") -} diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java index f1c592e6e834..4a0a55dce948 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -15,14 +15,11 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Strings; @@ -79,7 +76,7 @@ public void testFailureInConditionalProcessor() { internalCluster().ensureAtLeastNumDataNodes(1); internalCluster().startMasterOnlyNode(); final String pipelineId = "foo"; - clusterAdmin().preparePutPipeline(pipelineId, new BytesArray(Strings.format(""" + putJsonPipeline(pipelineId, Strings.format(""" { "processors": [ { @@ -99,7 +96,7 @@ public void testFailureInConditionalProcessor() { } } ] - }""", MockScriptEngine.NAME)), XContentType.JSON).get(); + }""", MockScriptEngine.NAME)); Exception e = expectThrows( Exception.class, @@ -126,22 +123,16 @@ public void testScriptDisabled() throws Exception { String pipelineIdWithScript = pipelineIdWithoutScript + "_script"; internalCluster().startNode(); - BytesReference pipelineWithScript = new BytesArray(Strings.format(""" + putJsonPipeline(pipelineIdWithScript, Strings.format(""" { "processors": [ { "script": { "lang": "%s", "source": "my_script" } } ] }""", MockScriptEngine.NAME)); - BytesReference pipelineWithoutScript = new BytesArray(""" + putJsonPipeline(pipelineIdWithoutScript, """ { "processors": [ { "set": { "field": "y", "value": 0 } } ] }"""); - Consumer checkPipelineExists = (id) -> assertThat( - clusterAdmin().prepareGetPipeline(id).get().pipelines().get(0).getId(), - equalTo(id) - ); - - clusterAdmin().preparePutPipeline(pipelineIdWithScript, pipelineWithScript, XContentType.JSON).get(); - clusterAdmin().preparePutPipeline(pipelineIdWithoutScript, pipelineWithoutScript, XContentType.JSON).get(); + Consumer checkPipelineExists = (id) -> assertThat(getPipelines(id).pipelines().get(0).getId(), equalTo(id)); checkPipelineExists.accept(pipelineIdWithScript); checkPipelineExists.accept(pipelineIdWithoutScript); @@ -197,14 +188,13 @@ public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exceptio putJsonStoredScript("1", Strings.format(""" {"script": {"lang": "%s", "source": "my_script"} } """, MockScriptEngine.NAME)); - BytesReference pipeline = new BytesArray(""" + putJsonPipeline("_id", """ { "processors" : [ {"set" : {"field": "y", "value": 0}}, {"script" : {"id": "1"}} ] }"""); - clusterAdmin().preparePutPipeline("_id", pipeline, XContentType.JSON).get(); prepareIndex("index").setId("1").setSource("x", 0).setPipeline("_id").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); @@ -232,13 +222,12 @@ public void testWithDedicatedIngestNode() throws Exception { String node = internalCluster().startNode(); String ingestNode = internalCluster().startNode(onlyRole(DiscoveryNodeRole.INGEST_ROLE)); - BytesReference pipeline = new BytesArray(""" + putJsonPipeline("_id", """ { "processors" : [ {"set" : {"field": "y", "value": 0}} ] }"""); - clusterAdmin().preparePutPipeline("_id", pipeline, XContentType.JSON).get(); prepareIndex("index").setId("1").setSource("x", 0).setPipeline("_id").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); @@ -264,7 +253,7 @@ public void testWithDedicatedIngestNode() throws Exception { public void testDefaultPipelineWaitForClusterStateRecovered() throws Exception { internalCluster().startNode(); - final var pipeline = new BytesArray(""" + putJsonPipeline("test_pipeline", """ { "processors" : [ { @@ -275,8 +264,8 @@ public void testDefaultPipelineWaitForClusterStateRecovered() throws Exception { } ] }"""); + final TimeValue timeout = TimeValue.timeValueSeconds(10); - client().admin().cluster().preparePutPipeline("test_pipeline", pipeline, XContentType.JSON).get(timeout); client().admin().indices().preparePutTemplate("pipeline_template").setPatterns(Collections.singletonList("*")).setSettings(""" { "index" : { @@ -357,16 +346,13 @@ public void testForwardBulkWithSystemWritePoolDisabled() throws Exception { // Create Bulk Request createIndex("index"); - BytesReference source = new BytesArray(""" + putJsonPipeline("_id", """ { "processors" : [ {"set" : {"field": "y", "value": 0}} ] }"""); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); - int numRequests = scaledRandomIntBetween(32, 128); BulkRequest bulkRequest = new BulkRequest(); BulkResponse response; diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/ManyNestedPipelinesIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/ManyNestedPipelinesIT.java index c9f3f023b43e..2c9ea27805a1 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/ManyNestedPipelinesIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/ManyNestedPipelinesIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.ingest.SimulateDocumentVerboseResult; import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.ingest.SimulateProcessorResult; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.Strings; import org.elasticsearch.ingest.GraphStructureException; @@ -166,7 +165,7 @@ private void createChainedPipelines(String prefix, int count) { private void createChainedPipeline(String prefix, int number) { String pipelineId = prefix + "pipeline_" + number; String nextPipelineId = prefix + "pipeline_" + (number + 1); - String pipelineTemplate = """ + putJsonPipeline(pipelineId, Strings.format(""" { "processors": [ { @@ -176,9 +175,7 @@ private void createChainedPipeline(String prefix, int number) { } ] } - """; - String pipeline = Strings.format(pipelineTemplate, nextPipelineId); - clusterAdmin().preparePutPipeline(pipelineId, new BytesArray(pipeline), XContentType.JSON).get(); + """, nextPipelineId)); } private void createLastPipeline(String prefix, int number) { @@ -195,6 +192,6 @@ private void createLastPipeline(String prefix, int number) { ] } """; - clusterAdmin().preparePutPipeline(pipelineId, new BytesArray(pipeline), XContentType.JSON).get(); + putJsonPipeline(pipelineId, pipeline); } } diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java index 7f0910ea5cc4..0b93609b3156 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/plugins/internal/XContentMeteringParserDecoratorWithPipelinesIT.java @@ -10,9 +10,6 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.ingest.common.IngestCommonPlugin; @@ -21,7 +18,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.FilterXContentParserWrapper; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.Collection; @@ -44,7 +40,7 @@ public class XContentMeteringParserDecoratorWithPipelinesIT extends ESIntegTestC public void testDocumentIsReportedWithPipelines() throws Exception { hasWrappedParser = false; // pipeline adding fields, changing destination is not affecting reporting - final BytesReference pipelineBody = new BytesArray(""" + putJsonPipeline("pipeline", """ { "processors": [ { @@ -62,7 +58,6 @@ public void testDocumentIsReportedWithPipelines() throws Exception { ] } """); - clusterAdmin().putPipeline(new PutPipelineRequest("pipeline", pipelineBody, XContentType.JSON)).actionGet(); client().index( new IndexRequest(TEST_INDEX_NAME).setPipeline("pipeline") diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index bc5bb165cd0d..64a679581f76 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -84,11 +84,6 @@ tasks.named("dependencyLicenses").configure { ignoreFile 'elastic-geoip-database-service-agreement-LICENSE.txt' } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTestsByFilePattern("**/ingest_geoip/20_geoip_processor.yml", "from 8.0 yaml rest tests use geoip test fixture and default geoip are no longer packaged. In 7.x yaml tests used default databases which makes tests results very different, so skipping these tests") - // task.skipTest("lang_mustache/50_multi_search_template/Multi-search template with errors", "xxx") -} - artifacts { restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } diff --git a/modules/ingest-geoip/qa/full-cluster-restart/build.gradle b/modules/ingest-geoip/qa/full-cluster-restart/build.gradle index a97664923438..e930b4ca3823 100644 --- a/modules/ingest-geoip/qa/full-cluster-restart/build.gradle +++ b/modules/ingest-geoip/qa/full-cluster-restart/build.gradle @@ -20,12 +20,10 @@ dependencies { javaRestTestImplementation(testArtifact(project(":qa:full-cluster-restart"), "javaRestTest")) } -assert Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() == 8 : - "If we are targeting a branch other than 8, we should enable migration tests" // once we are ready to test migrations from 8.x to 9.x, we can set the compatible version to 8.0.0 // see https://github.com/elastic/elasticsearch/pull/93666 -BuildParams.bwcVersions.withWireCompatible(v -> v.before("7.0.0")) { bwcVersion, baseName -> +BuildParams.bwcVersions.withWireCompatible(v -> v.before("9.0.0")) { bwcVersion, baseName -> tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { usesBwcDistribution(bwcVersion) systemProperty("tests.old_cluster_version", bwcVersion) diff --git a/modules/ingest-geoip/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/ingest/geoip/FullClusterRestartIT.java b/modules/ingest-geoip/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/ingest/geoip/FullClusterRestartIT.java index 4f8abf4b8239..b4d178868811 100644 --- a/modules/ingest-geoip/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/ingest/geoip/FullClusterRestartIT.java +++ b/modules/ingest-geoip/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/ingest/geoip/FullClusterRestartIT.java @@ -12,7 +12,9 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import org.apache.http.util.EntityUtils; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.client.Request; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.distribution.DistributionType; @@ -31,6 +33,8 @@ import static org.hamcrest.Matchers.contains; +@UpdateForV9 +@LuceneTestCase.AwaitsFix(bugUrl = "we need to figure out the index migrations here for 9.0") public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { private static final boolean useFixture = Boolean.getBoolean("geoip_use_service") == false; diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java index cc757c413713..15e7299dc104 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderIT.java @@ -19,10 +19,8 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -36,9 +34,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.RemoteTransportException; -import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.ClassRule; import java.io.IOException; @@ -47,7 +43,6 @@ import static org.elasticsearch.ingest.EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER; import static org.elasticsearch.ingest.geoip.EnterpriseGeoIpDownloaderTaskExecutor.MAXMIND_LICENSE_KEY_SETTING; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; public class EnterpriseGeoIpDownloaderIT extends ESIntegTestCase { @@ -155,31 +150,24 @@ private void configureDatabase(String databaseType) throws Exception { } private void createGeoIpPipeline(String pipelineName, String databaseType, String sourceField, String targetField) throws IOException { - final BytesReference bytes; - try (XContentBuilder builder = JsonXContent.contentBuilder()) { - builder.startObject(); + putJsonPipeline(pipelineName, (builder, params) -> { + builder.field("description", "test"); + builder.startArray("processors"); { - builder.field("description", "test"); - builder.startArray("processors"); + builder.startObject(); { - builder.startObject(); + builder.startObject("geoip"); { - builder.startObject("geoip"); - { - builder.field("field", sourceField); - builder.field("target_field", targetField); - builder.field("database_file", databaseType + ".mmdb"); - } - builder.endObject(); + builder.field("field", sourceField); + builder.field("target_field", targetField); + builder.field("database_file", databaseType + ".mmdb"); } builder.endObject(); } - builder.endArray(); + builder.endObject(); } - builder.endObject(); - bytes = BytesReference.bytes(builder); - } - assertAcked(clusterAdmin().putPipeline(new PutPipelineRequest(pipelineName, bytes, XContentType.JSON)).actionGet()); + return builder.endArray(); + }); } private String ingestDocument(String indexName, String pipelineName, String sourceField) { diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index d994bd70eb7a..41d711be2dee 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -530,91 +530,84 @@ private void putGeoIpPipeline(String pipelineId) throws IOException { * @throws IOException */ private void putGeoIpPipeline(String pipelineId, boolean downloadDatabaseOnPipelineCreation) throws IOException { - BytesReference bytes; - try (XContentBuilder builder = JsonXContent.contentBuilder()) { - builder.startObject(); + putJsonPipeline(pipelineId, ((builder, params) -> { + builder.startArray("processors"); { - builder.startArray("processors"); + /* + * First we add a non-geo pipeline with a random field value. This is purely here so that each call to this method + * creates a pipeline that is unique. Creating the a pipeline twice with the same ID and exact same bytes + * results in a no-op, meaning that the pipeline won't actually be updated and won't actually trigger all of the + * things we expect it to. + */ + builder.startObject(); { - /* - * First we add a non-geo pipeline with a random field value. This is purely here so that each call to this method - * creates a pipeline that is unique. Creating the a pipeline twice with the same ID and exact same bytes - * results in a no-op, meaning that the pipeline won't actually be updated and won't actually trigger all of the - * things we expect it to. - */ - builder.startObject(); + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); { - builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); - { - builder.field("randomField", randomAlphaOfLength(20)); - } - builder.endObject(); + builder.field("randomField", randomAlphaOfLength(20)); } builder.endObject(); + } + builder.endObject(); - builder.startObject(); + builder.startObject(); + { + builder.startObject("geoip"); { - builder.startObject("geoip"); - { - builder.field("field", "ip"); - builder.field("target_field", "ip-city"); - builder.field("database_file", "GeoLite2-City.mmdb"); - if (downloadDatabaseOnPipelineCreation == false || randomBoolean()) { - builder.field("download_database_on_pipeline_creation", downloadDatabaseOnPipelineCreation); - } + builder.field("field", "ip"); + builder.field("target_field", "ip-city"); + builder.field("database_file", "GeoLite2-City.mmdb"); + if (downloadDatabaseOnPipelineCreation == false || randomBoolean()) { + builder.field("download_database_on_pipeline_creation", downloadDatabaseOnPipelineCreation); } - builder.endObject(); } builder.endObject(); - builder.startObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject("geoip"); { - builder.startObject("geoip"); - { - builder.field("field", "ip"); - builder.field("target_field", "ip-country"); - builder.field("database_file", "GeoLite2-Country.mmdb"); - if (downloadDatabaseOnPipelineCreation == false || randomBoolean()) { - builder.field("download_database_on_pipeline_creation", downloadDatabaseOnPipelineCreation); - } + builder.field("field", "ip"); + builder.field("target_field", "ip-country"); + builder.field("database_file", "GeoLite2-Country.mmdb"); + if (downloadDatabaseOnPipelineCreation == false || randomBoolean()) { + builder.field("download_database_on_pipeline_creation", downloadDatabaseOnPipelineCreation); } - builder.endObject(); } builder.endObject(); - builder.startObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject("geoip"); { - builder.startObject("geoip"); - { - builder.field("field", "ip"); - builder.field("target_field", "ip-asn"); - builder.field("database_file", "GeoLite2-ASN.mmdb"); - if (downloadDatabaseOnPipelineCreation == false || randomBoolean()) { - builder.field("download_database_on_pipeline_creation", downloadDatabaseOnPipelineCreation); - } + builder.field("field", "ip"); + builder.field("target_field", "ip-asn"); + builder.field("database_file", "GeoLite2-ASN.mmdb"); + if (downloadDatabaseOnPipelineCreation == false || randomBoolean()) { + builder.field("download_database_on_pipeline_creation", downloadDatabaseOnPipelineCreation); } - builder.endObject(); } builder.endObject(); - builder.startObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject("geoip"); { - builder.startObject("geoip"); - { - builder.field("field", "ip"); - builder.field("target_field", "ip-city"); - builder.field("database_file", "MyCustomGeoLite2-City.mmdb"); - if (downloadDatabaseOnPipelineCreation == false || randomBoolean()) { - builder.field("download_database_on_pipeline_creation", downloadDatabaseOnPipelineCreation); - } + builder.field("field", "ip"); + builder.field("target_field", "ip-city"); + builder.field("database_file", "MyCustomGeoLite2-City.mmdb"); + if (downloadDatabaseOnPipelineCreation == false || randomBoolean()) { + builder.field("download_database_on_pipeline_creation", downloadDatabaseOnPipelineCreation); } - builder.endObject(); } builder.endObject(); } - builder.endArray(); + builder.endObject(); } - builder.endObject(); - bytes = BytesReference.bytes(builder); - } - assertAcked(clusterAdmin().preparePutPipeline(pipelineId, bytes, XContentType.JSON).get()); + return builder.endArray(); + })); } /** @@ -626,40 +619,33 @@ private void putNonGeoipPipeline(String pipelineId) throws IOException { * Adding the exact same pipeline twice is treated as a no-op. The random values that go into randomField make each pipeline * created by this method is unique to avoid this. */ - BytesReference bytes; - try (XContentBuilder builder = JsonXContent.contentBuilder()) { - builder.startObject(); + putJsonPipeline(pipelineId, ((builder, params) -> { + builder.startArray("processors"); { - builder.startArray("processors"); + builder.startObject(); { - builder.startObject(); - { - builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); - builder.field("randomField", randomAlphaOfLength(20)); - builder.endObject(); - } + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.field("randomField", randomAlphaOfLength(20)); builder.endObject(); - builder.startObject(); - { - builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); - builder.field("randomField", randomAlphaOfLength(20)); - builder.endObject(); - } + } + builder.endObject(); + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.field("randomField", randomAlphaOfLength(20)); builder.endObject(); - builder.startObject(); - { - builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); - builder.field("randomField", randomAlphaOfLength(20)); - builder.endObject(); - } + } + builder.endObject(); + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.field("randomField", randomAlphaOfLength(20)); builder.endObject(); } - builder.endArray(); + builder.endObject(); } - builder.endObject(); - bytes = BytesReference.bytes(builder); - } - assertAcked(clusterAdmin().preparePutPipeline(pipelineId, bytes, XContentType.JSON).get()); + return builder.endArray(); + })); } private List getGeoIpTmpDirs() throws IOException { diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java index ec54317e144d..51ad7cedba98 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java @@ -19,8 +19,6 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.After; import java.io.IOException; @@ -29,7 +27,6 @@ import java.util.Map; import java.util.stream.Collectors; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -98,30 +95,23 @@ public void testStats() throws Exception { } private void putPipeline() throws IOException { - BytesReference bytes; - try (XContentBuilder builder = JsonXContent.contentBuilder()) { - builder.startObject(); + putJsonPipeline("_id", (builder, params) -> { + builder.startArray("processors"); { - builder.startArray("processors"); + builder.startObject(); { - builder.startObject(); + builder.startObject("geoip"); { - builder.startObject("geoip"); - { - builder.field("field", "ip"); - builder.field("target_field", "ip-city"); - builder.field("database_file", "GeoLite2-City.mmdb"); - } - builder.endObject(); + builder.field("field", "ip"); + builder.field("target_field", "ip-city"); + builder.field("database_file", "GeoLite2-City.mmdb"); } builder.endObject(); } - builder.endArray(); + builder.endObject(); } - builder.endObject(); - bytes = BytesReference.bytes(builder); - } - assertAcked(clusterAdmin().preparePutPipeline("_id", bytes, XContentType.JSON).get()); + return builder.endArray(); + }); } public static Map convertToMap(ToXContent part) throws IOException { diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java index f34f647a01e0..58fdc81b72ae 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java @@ -11,22 +11,16 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.NodeRoles; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; import java.util.Arrays; import java.util.Map; import static org.elasticsearch.test.NodeRoles.nonIngestNode; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; public class GeoIpProcessorNonIngestNodeIT extends AbstractGeoIpIT { @@ -43,53 +37,46 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { */ public void testLazyLoading() throws IOException { assumeFalse("https://github.com/elastic/elasticsearch/issues/37342", Constants.WINDOWS); - final BytesReference bytes; - try (XContentBuilder builder = JsonXContent.contentBuilder()) { - builder.startObject(); + putJsonPipeline("geoip", (builder, params) -> { + builder.field("description", "test"); + builder.startArray("processors"); { - builder.field("description", "test"); - builder.startArray("processors"); + builder.startObject(); { - builder.startObject(); + builder.startObject("geoip"); { - builder.startObject("geoip"); - { - builder.field("field", "ip"); - builder.field("target_field", "ip-city"); - builder.field("database_file", "GeoLite2-City.mmdb"); - } - builder.endObject(); + builder.field("field", "ip"); + builder.field("target_field", "ip-city"); + builder.field("database_file", "GeoLite2-City.mmdb"); } builder.endObject(); - builder.startObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject("geoip"); { - builder.startObject("geoip"); - { - builder.field("field", "ip"); - builder.field("target_field", "ip-country"); - builder.field("database_file", "GeoLite2-Country.mmdb"); - } - builder.endObject(); + builder.field("field", "ip"); + builder.field("target_field", "ip-country"); + builder.field("database_file", "GeoLite2-Country.mmdb"); } builder.endObject(); - builder.startObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject("geoip"); { - builder.startObject("geoip"); - { - builder.field("field", "ip"); - builder.field("target_field", "ip-asn"); - builder.field("database_file", "GeoLite2-ASN.mmdb"); - } - builder.endObject(); + builder.field("field", "ip"); + builder.field("target_field", "ip-asn"); + builder.field("database_file", "GeoLite2-ASN.mmdb"); } builder.endObject(); } - builder.endArray(); + builder.endObject(); } - builder.endObject(); - bytes = BytesReference.bytes(builder); - } - assertAcked(clusterAdmin().putPipeline(new PutPipelineRequest("geoip", bytes, XContentType.JSON)).actionGet()); + return builder.endArray(); + }); // the geo-IP databases should not be loaded on any nodes as they are all non-ingest nodes Arrays.stream(internalCluster().getNodeNames()).forEach(node -> assertDatabaseLoadStatus(node, false)); diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index 64cd38c58482..d124770e33cc 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -18,7 +18,3 @@ restResources { include '_common', 'indices', 'index', 'cluster', 'nodes', 'get', 'ingest' } } - -tasks.named("yamlRestTestV7CompatTransform").configure {task -> - task.addAllowedWarningRegex("setting \\[ecs\\] is deprecated as ECS format is the default and only option") -} diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index c36275699e21..3cbcabed20a9 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -26,7 +26,3 @@ restResources { } } -tasks.named("yamlRestTestV7CompatTransform").configure {task -> - task.addAllowedWarningRegex("\\[types removal\\].*") - task.replaceValueInMatch("responses.1.error.root_cause.0.type", "x_content_e_o_f_exception", "Multi-search template with errors") -} diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index cc557ac2289f..0b2882934a12 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -61,48 +61,6 @@ tasks.named("test").configure { jvmArgs '-XX:-OmitStackTraceInFastThrow', '-XX:-HeapDumpOnOutOfMemoryError' } -tasks.named("yamlRestTestV7CompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'painless/20_scriptfield/Scripted Field Doing Compare (fields api)', - 'painless/70_execute_painless_scripts/Execute with double field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with double field context (single-value, fields api)', - '70_execute_painless_scripts/Execute with geo point field context (multi-value, fields api)', - '70_execute_painless_scripts/Execute with ip field context (single-value, fields api)', - '70_execute_painless_scripts/Execute with boolean field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with boolean field context (multi-value, fields api)', - 'painless/40_fields_api/date to long', - 'painless/130_metric_agg/Scripted Metric Agg Total (fields api)', - 'painless/70_execute_painless_scripts/Execute with keyword field context (multi-value, fields api)', - 'painless/100_terms_agg/Double Value Script with doc notation (fields api)', - 'painless/100_terms_agg/Long Value Script with doc notation (fields api)', - 'painless/20_scriptfield/Access a date (fields api)', - 'painless/70_execute_painless_scripts/Execute with date field context (multi-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with keyword field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with long field context (multi-value, fields api)', - 'painless/20_scriptfield/Scripted Field (fields api)', - 'painless/70_execute_painless_scripts/Execute with long field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with geo point field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with date field context (single-value, fields api)', - 'painless/40_fields_api/missing field', - 'painless/40_fields_api/sort script fields api', - 'painless/20_scriptfield/Access many dates (fields api)', - 'painless/70_execute_painless_scripts/Execute with long field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with geo point field context (single-value, fields api)', - 'painless/70_execute_painless_scripts/Execute with date field context (single-value, fields api)', - 'painless/40_fields_api/missing field', - 'painless/40_fields_api/sort script fields api', - 'painless/20_scriptfield/Access many dates (fields api)', - 'painless/100_terms_agg/String Value Script with doc notation (fields api)', - 'painless/40_fields_api/string to long and bigint', - 'painless/40_fields_api/boolean to long and bigint', - 'painless/40_fields_api/script fields api for dates', - 'painless/70_execute_painless_scripts/Execute with double field context (multi-value, fields api)', - 'painless/40_fields_api/filter script fields api', - 'painless/40_fields_api/script score fields api', - 'painless/70_mov_fn_agg/*' // Agg moved to a module. - ].join(',') -} - esplugin.bundleSpec.into("spi") { from(configurations.spi) } diff --git a/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java b/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java index 1bd6468c562f..99fb0edd4334 100644 --- a/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java +++ b/modules/lang-painless/src/internalClusterTest/java/org/elasticsearch/painless/action/CrossClusterPainlessExecuteIT.java @@ -175,7 +175,7 @@ private void setupTwoClusters() throws Exception { assertFalse( client(REMOTE_CLUSTER).admin() .cluster() - .prepareHealth(REMOTE_INDEX) + .prepareHealth(TEST_REQUEST_TIMEOUT, REMOTE_INDEX) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java index 2808dae31239..f41d365f305b 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapper.java @@ -352,7 +352,7 @@ private static int getLevels(int treeLevels, double precisionInMeters, int defau public LegacyGeoShapeFieldMapper build(MapperBuilderContext context) { LegacyGeoShapeParser parser = new LegacyGeoShapeParser(); GeoShapeFieldType ft = buildFieldType(parser, context); - return new LegacyGeoShapeFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, parser, this); + return new LegacyGeoShapeFieldMapper(leafName(), ft, builderParams(this, context), parser, this); } } @@ -537,20 +537,18 @@ public PrefixTreeStrategy resolvePrefixTreeStrategy(String strategyName) { public LegacyGeoShapeFieldMapper( String simpleName, MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, LegacyGeoShapeParser parser, Builder builder ) { super( simpleName, mappedFieldType, + builderParams, builder.ignoreMalformed.get(), builder.coerce.get(), builder.ignoreZValue.get(), builder.orientation.get(), - multiFields, - copyTo, parser ); this.indexCreatedVersion = builder.indexCreatedVersion; diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java index 4fa1d7b7a310..0e9d7ca5f15c 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.GeometryCollection; import org.elasticsearch.geometry.Line; @@ -342,6 +343,8 @@ public void testParsePolygon() throws IOException, ParseException { assertGeometryEquals(p, polygonGeoJson, false); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParse3DPolygon() throws IOException, ParseException { XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() .startObject() diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java index 6e8a61277ccc..74340e705b57 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.Orientation; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Line; import org.elasticsearch.geometry.MultiLine; @@ -301,6 +302,8 @@ public void testParseMixedDimensionPolyWithHole() throws IOException, ParseExcep assertThat(e, hasToString(containsString("coordinate dimensions do not match"))); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0)); @@ -334,6 +337,8 @@ public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { assertThat(e, hasToString(containsString("unable to add coordinate to CoordinateBuilder: coordinate dimensions do not match"))); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParsePolyWithStoredZ() throws IOException { List shellCoordinates = new ArrayList<>(); shellCoordinates.add(new Coordinate(100, 0, 0)); @@ -357,6 +362,8 @@ public void testParsePolyWithStoredZ() throws IOException { assertEquals(shapeBuilder.numDimensions(), 3); } + @UpdateForV9 + @AwaitsFix(bugUrl = "this test is using pre 8.0.0 index versions so needs to be removed or updated") public void testParseOpenPolygon() throws IOException { String openPolygon = "POLYGON ((100 5, 100 10, 90 10, 90 5))"; diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java index 0a0bb12bedba..407f372bee26 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldMapperTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; +import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoUtils; @@ -19,6 +20,7 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.geometry.Point; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -53,6 +55,8 @@ import static org.mockito.Mockito.when; @SuppressWarnings("deprecation") +@UpdateForV9 +@AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeFieldMapperTests extends MapperTestCase { @Override diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java index dc74b9cd295c..a64352c5306e 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/mapper/LegacyGeoShapeFieldTypeTests.java @@ -7,7 +7,9 @@ */ package org.elasticsearch.legacygeo.mapper; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.common.geo.SpatialStrategy; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.FieldTypeTestCase; @@ -20,6 +22,8 @@ import java.util.List; import java.util.Map; +@UpdateForV9 +@LuceneTestCase.AwaitsFix(bugUrl = "this is testing legacy functionality so can likely be removed in 9.0") public class LegacyGeoShapeFieldTypeTests extends FieldTypeTestCase { /** diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index b3cd3586fca5..a616fe9c20c2 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperBuilderContext; -import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.SourceValueFetcher; import org.elasticsearch.index.mapper.StringFieldType; import org.elasticsearch.index.mapper.StringStoredFieldFieldLoader; @@ -139,13 +138,11 @@ private MatchOnlyTextFieldType buildFieldType(MapperBuilderContext context) { @Override public MatchOnlyTextFieldMapper build(MapperBuilderContext context) { MatchOnlyTextFieldType tft = buildFieldType(context); - MultiFields multiFields = multiFieldsBuilder.build(this, context); return new MatchOnlyTextFieldMapper( leafName(), Defaults.FIELD_TYPE, tft, - multiFields, - copyTo, + builderParams(this, context), context.isSourceSynthetic(), this ); @@ -382,12 +379,11 @@ private MatchOnlyTextFieldMapper( String simpleName, FieldType fieldType, MatchOnlyTextFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, boolean storeSource, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); assert mappedFieldType.getTextSearchInfo().isTokenized(); assert mappedFieldType.hasDocValues() == false; this.fieldType = freezeAndDeduplicateFieldType(fieldType); @@ -436,22 +432,14 @@ public MatchOnlyTextFieldType fieldType() { } @Override - protected SyntheticSourceMode syntheticSourceMode() { - return SyntheticSourceMode.NATIVE; - } - - @Override - public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - if (copyTo.copyToFields().isEmpty() != true) { - throw new IllegalArgumentException( - "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" - ); - } - return new StringStoredFieldFieldLoader(fieldType().storedFieldNameForSyntheticSource(), leafName()) { + protected SyntheticSourceSupport syntheticSourceSupport() { + var loader = new StringStoredFieldFieldLoader(fieldType().storedFieldNameForSyntheticSource(), leafName()) { @Override protected void write(XContentBuilder b, Object value) throws IOException { b.value((String) value); } }; + + return new SyntheticSourceSupport.Native(loader); } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java index bd3845e1ee18..0b475641e429 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureFieldMapper.java @@ -98,8 +98,7 @@ public RankFeatureFieldMapper build(MapperBuilderContext context) { positiveScoreImpact.getValue(), nullValue.getValue() ), - multiFieldsBuilder.build(this, context), - copyTo, + builderParams(this, context), positiveScoreImpact.getValue(), nullValue.getValue() ); @@ -172,12 +171,11 @@ public Query termQuery(Object value, SearchExecutionContext context) { private RankFeatureFieldMapper( String simpleName, MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, boolean positiveScoreImpact, Float nullValue ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); this.positiveScoreImpact = positiveScoreImpact; this.nullValue = nullValue; } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureMetaFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureMetaFieldMapper.java index c45065037b5a..366145be02d8 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureMetaFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureMetaFieldMapper.java @@ -12,7 +12,6 @@ import org.apache.lucene.search.Query; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; @@ -75,9 +74,4 @@ private RankFeatureMetaFieldMapper() { protected String contentType() { return CONTENT_TYPE; } - - @Override - public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - return SourceLoader.SyntheticFieldLoader.NOTHING; - } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java index e6cb3010f996..5b1d35ec03c0 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java @@ -66,8 +66,7 @@ public RankFeaturesFieldMapper build(MapperBuilderContext context) { return new RankFeaturesFieldMapper( leafName(), new RankFeaturesFieldType(context.buildFullName(leafName()), meta.getValue(), positiveScoreImpact.getValue()), - multiFieldsBuilder.build(this, context), - copyTo, + builderParams(this, context), positiveScoreImpact.getValue() ); } @@ -122,11 +121,10 @@ private static String indexedValueForSearch(Object value) { private RankFeaturesFieldMapper( String simpleName, MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, boolean positiveScoreImpact ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); this.positiveScoreImpact = positiveScoreImpact; } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index c346a7d66914..ac236d13cc58 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.SimpleMappedFieldType; import org.elasticsearch.index.mapper.SortedNumericDocValuesSyntheticFieldLoader; -import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.SourceValueFetcher; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.TimeSeriesParams; @@ -197,14 +196,7 @@ public ScaledFloatFieldMapper build(MapperBuilderContext context) { metric.getValue(), indexMode ); - return new ScaledFloatFieldMapper( - leafName(), - type, - multiFieldsBuilder.build(this, context), - copyTo, - context.isSourceSynthetic(), - this - ); + return new ScaledFloatFieldMapper(leafName(), type, builderParams(this, context), context.isSourceSynthetic(), this); } } @@ -470,12 +462,11 @@ public String toString() { private ScaledFloatFieldMapper( String simpleName, ScaledFloatFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, boolean isSourceSynthetic, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo); + super(simpleName, mappedFieldType, builderParams); this.isSourceSynthetic = isSourceSynthetic; this.indexed = builder.indexed.getValue(); this.hasDocValues = builder.hasDocValues.getValue(); @@ -713,32 +704,19 @@ public int docValueCount() { } @Override - protected SyntheticSourceMode syntheticSourceMode() { - return SyntheticSourceMode.NATIVE; - } + protected SyntheticSourceSupport syntheticSourceSupport() { + if (hasDocValues) { + var loader = new SortedNumericDocValuesSyntheticFieldLoader(fullPath(), leafName(), ignoreMalformed.value()) { + @Override + protected void writeValue(XContentBuilder b, long value) throws IOException { + b.value(decodeForSyntheticSource(value, scalingFactor)); + } + }; - @Override - public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - if (hasDocValues == false) { - throw new IllegalArgumentException( - "field [" - + fullPath() - + "] of type [" - + typeName() - + "] doesn't support synthetic source because it doesn't have doc values" - ); - } - if (copyTo.copyToFields().isEmpty() != true) { - throw new IllegalArgumentException( - "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" - ); + return new SyntheticSourceSupport.Native(loader); } - return new SortedNumericDocValuesSyntheticFieldLoader(fullPath(), leafName(), ignoreMalformed.value()) { - @Override - protected void writeValue(XContentBuilder b, long value) throws IOException { - b.value(decodeForSyntheticSource(value, scalingFactor)); - } - }; + + return super.syntheticSourceSupport(); } /** diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java index d521f9b2d2a3..57ac8fdfbb02 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/SearchAsYouTypeFieldMapper.java @@ -262,11 +262,10 @@ public SearchAsYouTypeFieldMapper build(MapperBuilderContext context) { return new SearchAsYouTypeFieldMapper( leafName(), ft, - copyTo, + builderParams(this, context), indexAnalyzers, prefixFieldMapper, shingleFieldMappers, - multiFieldsBuilder.build(this, context), this ); } @@ -498,7 +497,7 @@ static final class PrefixFieldMapper extends FieldMapper { final FieldType fieldType; PrefixFieldMapper(FieldType fieldType, PrefixFieldType mappedFieldType) { - super(mappedFieldType.name(), mappedFieldType, MultiFields.empty(), CopyTo.empty()); + super(mappedFieldType.name(), mappedFieldType, BuilderParams.empty()); this.fieldType = Mapper.freezeAndDeduplicateFieldType(fieldType); } @@ -537,7 +536,7 @@ static final class ShingleFieldMapper extends FieldMapper { private final FieldType fieldType; ShingleFieldMapper(FieldType fieldType, ShingleFieldType mappedFieldtype) { - super(mappedFieldtype.name(), mappedFieldtype, MultiFields.empty(), CopyTo.empty()); + super(mappedFieldtype.name(), mappedFieldtype, BuilderParams.empty()); this.fieldType = freezeAndDeduplicateFieldType(fieldType); } @@ -672,14 +671,13 @@ public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRew public SearchAsYouTypeFieldMapper( String simpleName, SearchAsYouTypeFieldType mappedFieldType, - CopyTo copyTo, + BuilderParams builderParams, Map indexAnalyzers, PrefixFieldMapper prefixField, ShingleFieldMapper[] shingleFields, - MultiFields multiFields, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); this.prefixField = prefixField; this.shingleFields = shingleFields; this.maxShingleSize = builder.maxShingleSize.getValue(); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java index 9db677ddddff..fa0a96a548a9 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/TokenCountFieldMapper.java @@ -87,7 +87,7 @@ public TokenCountFieldMapper build(MapperBuilderContext context) { nullValue.getValue(), meta.getValue() ); - return new TokenCountFieldMapper(leafName(), ft, multiFieldsBuilder.build(this, context), copyTo, this); + return new TokenCountFieldMapper(leafName(), ft, builderParams(this, context), this); } } @@ -135,14 +135,8 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) private final boolean enablePositionIncrements; private final Integer nullValue; - protected TokenCountFieldMapper( - String simpleName, - MappedFieldType defaultFieldType, - MultiFields multiFields, - CopyTo copyTo, - Builder builder - ) { - super(simpleName, defaultFieldType, multiFields, copyTo); + protected TokenCountFieldMapper(String simpleName, MappedFieldType defaultFieldType, BuilderParams builderParams, Builder builder) { + super(simpleName, defaultFieldType, builderParams); this.analyzer = builder.analyzer.getValue(); this.enablePositionIncrements = builder.enablePositionIncrements.getValue(); this.nullValue = builder.nullValue.getValue(); diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java index 56b9bb7f748b..765e72091a1b 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java @@ -470,12 +470,7 @@ private void mapping(XContentBuilder b) throws IOException { @Override public List invalidExample() throws IOException { - return List.of( - new SyntheticSourceInvalidExample( - equalTo("field [field] of type [scaled_float] doesn't support synthetic source because it doesn't have doc values"), - b -> b.field("type", "scaled_float").field("scaling_factor", 10).field("doc_values", false) - ) - ); + return List.of(); } } diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml index 7d1d6e2edec3..b4ee226f7269 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/match_only_text/10_basic.yml @@ -351,3 +351,45 @@ tsdb: "@timestamp" : "2000-01-01T00:00:00.000Z" "dimension" : "a" foo: "Apache Lucene powers Elasticsearch" + +--- +synthetic_source with copy_to: + - requires: + cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] + reason: requires copy_to support in synthetic source + + - do: + indices.create: + index: synthetic_source_test + body: + mappings: + _source: + mode: synthetic + properties: + foo: + type: match_only_text + copy_to: copy + copy: + type: keyword + + - do: + index: + index: synthetic_source_test + id: "1" + refresh: true + body: + foo: "Apache Lucene powers Elasticsearch" + + - do: + search: + index: synthetic_source_test + body: + fields: ["copy"] + + - match: { "hits.total.value": 1 } + - match: + hits.hits.0._source.foo: "Apache Lucene powers Elasticsearch" + - match: + hits.hits.0.fields.copy.0: "Apache Lucene powers Elasticsearch" + + diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index 844478c83e7c..3a1d8a396c4b 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -19,7 +19,3 @@ restResources { include '_common', 'bulk', 'cluster', 'get', 'nodes', 'indices', 'index', 'search' } } - -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("/30_inner_hits/profile fetch", "profile output has changed") -} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java index 7e9b6916e99d..f6392f32a88d 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentIdFieldMapper.java @@ -88,7 +88,7 @@ public Object valueForDisplay(Object value) { } protected ParentIdFieldMapper(String name, boolean eagerGlobalOrdinals) { - super(name, new ParentIdFieldType(name, eagerGlobalOrdinals), MultiFields.empty(), CopyTo.empty(), false, null); + super(name, new ParentIdFieldType(name, eagerGlobalOrdinals), BuilderParams.empty()); } @Override diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index dc760c0b07b7..ccb67f5c51ac 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -210,7 +210,7 @@ protected ParentJoinFieldMapper( boolean eagerGlobalOrdinals, List relations ) { - super(simpleName, mappedFieldType, MultiFields.empty(), CopyTo.empty(), false, null); + super(simpleName, mappedFieldType, BuilderParams.empty()); this.parentIdFields = parentIdFields; this.eagerGlobalOrdinals = eagerGlobalOrdinals; this.relations = relations; diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index b9b257a42e05..041fbb8bce34 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -23,7 +23,3 @@ restResources { include '_common', 'get', 'indices', 'index', 'search', 'msearch' } } - -tasks.named("yamlRestTestV7CompatTransform").configure{ task -> - task.addAllowedWarningRegex("\\[types removal\\].*") -} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index ad936a5491b6..576ea4dbd5d2 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -137,8 +137,6 @@ protected Parameter[] getParameters() { @Override public PercolatorFieldMapper build(MapperBuilderContext context) { PercolatorFieldType fieldType = new PercolatorFieldType(context.buildFullName(leafName()), meta.getValue()); - // TODO should percolator even allow multifields? - MultiFields multiFields = multiFieldsBuilder.build(this, context); context = context.createChildContext(leafName(), null); KeywordFieldMapper extractedTermsField = createExtractQueryFieldBuilder( EXTRACTED_TERMS_FIELD_NAME, @@ -165,8 +163,7 @@ public PercolatorFieldMapper build(MapperBuilderContext context) { return new PercolatorFieldMapper( leafName(), fieldType, - multiFields, - copyTo, + builderParams(this, context), searchExecutionContext, extractedTermsField, extractionResultField, @@ -375,8 +372,7 @@ static Tuple, Map>> extractTermsAndRanges(In PercolatorFieldMapper( String simpleName, MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, Supplier searchExecutionContext, KeywordFieldMapper queryTermsField, KeywordFieldMapper extractionResultField, @@ -387,7 +383,7 @@ static Tuple, Map>> extractTermsAndRanges(In IndexVersion indexCreatedVersion, Supplier clusterTransportVersion ) { - super(simpleName, mappedFieldType, multiFields, copyTo); + super(simpleName, mappedFieldType, builderParams); this.searchExecutionContext = searchExecutionContext; this.queryTermsField = queryTermsField; this.extractionResultField = extractionResultField; diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 9cd7963224cf..9e1e1e842ba5 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -160,30 +160,3 @@ if (OS.current() == OS.WINDOWS) { } } } - -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("reindex/20_validation/reindex without source gives useful error message", "exception with a type. Not much benefit adding _doc there.") - task.skipTest("update_by_query/20_validation/update_by_query without source gives useful error message", "exception with a type. Not much benefit adding _doc there.") - - // these tests are all relying on a call to refresh all indices, when they could easily be changed - // in 7.x to call the specific index they want to refresh. - // See https://github.com/elastic/elasticsearch/issues/81188 - task.skipTest("delete_by_query/70_throttle/Rethrottle to -1 which turns off throttling", "test relies on system index being non-hidden") - task.skipTest("delete_by_query/80_slices/Multiple slices with rethrottle", "test relies on system index being non-hidden") - task.skipTest("delete_by_query/80_slices/Multiple slices with wait_for_completion=false", "test relies on system index being non-hidden") - task.skipTest("reindex/80_slices/Multiple slices with rethrottle", "test relies on system index being non-hidden") - task.skipTest("reindex/80_slices/Multiple slices with wait_for_completion=false", "test relies on system index being non-hidden") - task.skipTest("update_by_query/70_slices/Multiple slices with rethrottle", "test relies on system index being non-hidden") - task.skipTest("update_by_query/70_slices/Multiple slices with wait_for_completion=false", "test relies on system index being non-hidden") - - task.addAllowedWarningRegex("\\[types removal\\].*") -} - -tasks.named("yamlRestTestV7CompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - 'update_by_query/80_scripting/Can\'t change _id', - 'update_by_query/80_scripting/Set unsupported operation type', - 'update_by_query/80_scripting/Setting bogus context is an error', - - ].join(',') -} diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java index ac850e991296..bf34c322c1a9 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -88,8 +88,8 @@ public void testStartMigrationAndImmediatelyCheckStatus() throws Exception { ensureGreen(); - PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); - GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT); + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT); // Start the migration and *immediately* request the status. We're trying to detect a race condition with this test, so we need to // do this as fast as possible, but not before the request to start the migration completes. @@ -170,7 +170,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { postUpgradeHookCalled.set(true); }); - PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT); PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, migrationRequest).get(); assertThat(migrationResponse.getReason(), nullValue()); assertThat(migrationResponse.getElasticsearchException(), nullValue()); @@ -180,7 +180,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { .collect(Collectors.toSet()); assertThat(migratingFeatures, hasItem(FEATURE_NAME)); - GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT); // The feature upgrade may take longer than ten seconds when tests are running // in parallel, so we give assertBusy a sixty-second timeout. assertBusy(() -> { @@ -196,7 +196,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { assertTrue("the pre-migration hook wasn't actually called", preUpgradeHookCalled.get()); assertTrue("the post-migration hook wasn't actually called", postUpgradeHookCalled.get()); - Metadata finalMetadata = clusterAdmin().prepareState().get().getState().metadata(); + Metadata finalMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); // Check that the results metadata is what we expect. FeatureMigrationResults currentResults = finalMetadata.custom(FeatureMigrationResults.TYPE); assertThat(currentResults, notNullValue()); @@ -246,12 +246,12 @@ public void testMigrateIndexWithWriteBlock() throws Exception { updateIndexSettings(Settings.builder().put("index.blocks.write", true), indexName); ensureGreen(); - client().execute(PostFeatureUpgradeAction.INSTANCE, new PostFeatureUpgradeRequest()).get(); + client().execute(PostFeatureUpgradeAction.INSTANCE, new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT)).get(); assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResp = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); @@ -299,7 +299,7 @@ public void onFailure(Exception e) { fail("cluster state update failed, see log for details"); } - PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT); PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, migrationRequest).get(); // Make sure we actually started the migration assertTrue( @@ -309,7 +309,7 @@ public void onFailure(Exception e) { // Now wait for the migration to finish (otherwise the test infra explodes) assertBusy(() -> { - GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT); GetFeatureUpgradeStatusResponse statusResp = client().execute(GetFeatureUpgradeStatusAction.INSTANCE, getStatusRequest).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); @@ -337,8 +337,10 @@ private void migrateWithTemplatesV1(String templatePrefix, SystemIndexDescriptor ensureGreen(); - PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, new PostFeatureUpgradeRequest()) - .get(); + PostFeatureUpgradeResponse migrationResponse = client().execute( + PostFeatureUpgradeAction.INSTANCE, + new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT) + ).get(); assertTrue(migrationResponse.isAccepted()); } @@ -349,7 +351,7 @@ public void testBailOnMigrateWithTemplatesV1() throws Exception { assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResp = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.ERROR)); @@ -364,7 +366,7 @@ public void testMigrateWithTemplatesV1() throws Exception { assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResp = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); @@ -426,8 +428,10 @@ private void migrateWithTemplatesV2(String prefix, SystemIndexDescriptor... desc ensureGreen(); - PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, new PostFeatureUpgradeRequest()) - .get(); + PostFeatureUpgradeResponse migrationResponse = client().execute( + PostFeatureUpgradeAction.INSTANCE, + new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT) + ).get(); assertTrue(migrationResponse.isAccepted()); } @@ -437,7 +441,7 @@ public void testBailOnMigrateWithTemplatesV2() throws Exception { assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResp = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.ERROR)); @@ -452,7 +456,7 @@ public void testMigrateWithTemplatesV2() throws Exception { assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResp = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); logger.info(Strings.toString(statusResp)); assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java index 8f9c2b7f3410..f02695c63a7e 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java @@ -176,7 +176,7 @@ public void testMultipleFeatureMigration() throws Exception { hooksCalled.countDown(); }); - PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT); PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, migrationRequest).get(); assertThat(migrationResponse.getReason(), nullValue()); assertThat(migrationResponse.getElasticsearchException(), nullValue()); @@ -189,7 +189,7 @@ public void testMultipleFeatureMigration() throws Exception { // wait for all the plugin methods to have been called before assertBusy since that will exponentially backoff assertThat(hooksCalled.await(30, TimeUnit.SECONDS), is(true)); - GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT); assertBusy(() -> { GetFeatureUpgradeStatusResponse statusResponse = client().execute(GetFeatureUpgradeStatusAction.INSTANCE, getStatusRequest) .get(); @@ -203,7 +203,7 @@ public void testMultipleFeatureMigration() throws Exception { assertTrue("the second plugin's pre-migration hook wasn't actually called", secondPluginPreMigrationHookCalled.get()); assertTrue("the second plugin's post-migration hook wasn't actually called", secondPluginPostMigrationHookCalled.get()); - Metadata finalMetadata = clusterAdmin().prepareState().get().getState().metadata(); + Metadata finalMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); // Check that the results metadata is what we expect FeatureMigrationResults currentResults = finalMetadata.custom(FeatureMigrationResults.TYPE); assertThat(currentResults, notNullValue()); @@ -264,12 +264,12 @@ public void testMultipleFeatureMigration() throws Exception { .setAliasName(".second-internal-managed-alias") .setPrimaryIndex(".second-int-man-old") .setType(SystemIndexDescriptor.Type.INTERNAL_MANAGED) - .setSettings(createSettings(IndexVersions.V_7_0_0, 0)) + .setSettings(createSettings(IndexVersions.MINIMUM_COMPATIBLE, 0)) .setMappings(createMapping(true, true)) .setOrigin(ORIGIN) .setVersionMetaKey(VERSION_META_KEY) .setAllowedElasticProductOrigins(Collections.emptyList()) - .setMinimumNodeVersion(Version.V_7_0_0) + .setMinimumNodeVersion(Version.CURRENT.minimumCompatibilityVersion()) .setPriorSystemIndexDescriptors(Collections.emptyList()) .build(); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java index 47c6e8faf15b..6484d483bbcd 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java @@ -85,7 +85,7 @@ public void testSystemIndexMigrationCanBeInterruptedWithShutdown() throws Except clusterService.addListener(clusterStateListener); // create task by calling API - final PostFeatureUpgradeRequest req = new PostFeatureUpgradeRequest(); + final PostFeatureUpgradeRequest req = new PostFeatureUpgradeRequest(TEST_REQUEST_TIMEOUT); client().execute(PostFeatureUpgradeAction.INSTANCE, req); logger.info("migrate feature api called"); @@ -101,12 +101,12 @@ public Settings onNodeStopped(String nodeName) throws Exception { assertBusy(() -> { // Wait for the node we restarted to completely rejoin the cluster - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat("expected restarted node to rejoin cluster", clusterState.getNodes().size(), equalTo(2)); GetFeatureUpgradeStatusResponse statusResponse = client().execute( GetFeatureUpgradeStatusAction.INSTANCE, - new GetFeatureUpgradeStatusRequest() + new GetFeatureUpgradeStatusRequest(TEST_REQUEST_TIMEOUT) ).get(); assertThat( "expected migration to fail due to restarting only data node", diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java index a2911090ab93..4c914764cdb5 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java @@ -15,9 +15,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine.Operation.Origin; @@ -35,7 +32,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.xcontent.XContentType; import org.hamcrest.Matcher; import org.junit.Before; @@ -47,7 +43,6 @@ import java.util.stream.IntStream; import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; @@ -231,14 +226,13 @@ public void testReindexCancel() throws Exception { } public void testUpdateByQueryCancel() throws Exception { - BytesReference pipeline = new BytesArray(""" + putJsonPipeline("set-processed", """ { "description" : "sets processed to true", "processors" : [ { "test" : {} } ] }"""); - assertAcked(clusterAdmin().preparePutPipeline("set-processed", pipeline, XContentType.JSON).get()); testCancel( UpdateByQueryAction.INSTANCE, @@ -250,7 +244,7 @@ public void testUpdateByQueryCancel() throws Exception { equalTo("update-by-query [" + INDEX + "]") ); - assertAcked(clusterAdmin().deletePipeline(new DeletePipelineRequest("set-processed")).get()); + deletePipeline("set-processed"); } public void testDeleteByQueryCancel() throws Exception { @@ -279,14 +273,13 @@ public void testReindexCancelWithWorkers() throws Exception { } public void testUpdateByQueryCancelWithWorkers() throws Exception { - BytesReference pipeline = new BytesArray(""" + putJsonPipeline("set-processed", """ { "description" : "sets processed to true", "processors" : [ { "test" : {} } ] }"""); - assertAcked(clusterAdmin().preparePutPipeline("set-processed", pipeline, XContentType.JSON).get()); testCancel( UpdateByQueryAction.INSTANCE, @@ -298,7 +291,7 @@ public void testUpdateByQueryCancelWithWorkers() throws Exception { equalTo("update-by-query [" + INDEX + "]") ); - assertAcked(clusterAdmin().deletePipeline(new DeletePipelineRequest("set-processed")).get()); + deletePipeline("set-processed"); } public void testDeleteByQueryCancelWithWorkers() throws Exception { diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 1ab370ad203f..7916bb5942fc 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -85,6 +85,7 @@ import java.util.stream.StreamSupport; import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_REQUESTS_TOTAL; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.getRepositoryDataBlobName; import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomNonDataPurpose; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -428,12 +429,7 @@ public void testEnforcedCooldownPeriod() throws IOException { ); repository.blobStore() .blobContainer(repository.basePath()) - .writeBlobAtomic( - randomNonDataPurpose(), - BlobStoreRepository.INDEX_FILE_PREFIX + modifiedRepositoryData.getGenId(), - serialized, - true - ); + .writeBlobAtomic(randomNonDataPurpose(), getRepositoryDataBlobName(modifiedRepositoryData.getGenId()), serialized, true); final String newSnapshotName = "snapshot-new"; final long beforeThrottledSnapshot = repository.threadPool().relativeTimeInNanos(); diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 3fe2f9d9bae4..3537d430e212 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -33,11 +33,6 @@ dependencies { internalClusterTestImplementation project(':test:fixtures:url-fixture') } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("repository_url/10_basic/Restore with repository-url using file://", "Error message has changed") - task.skipTest("repository_url/10_basic/Restore with repository-url using http://", "Error message has changed") -} - tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( 'javax.servlet.ServletContextEvent', diff --git a/modules/runtime-fields-common/build.gradle b/modules/runtime-fields-common/build.gradle index 5a2d268cf7a4..f9485b6ed302 100644 --- a/modules/runtime-fields-common/build.gradle +++ b/modules/runtime-fields-common/build.gradle @@ -21,10 +21,3 @@ dependencies { api project(':libs:elasticsearch-grok') api project(':libs:elasticsearch-dissect') } - -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("runtime_fields/100_geo_point/fetch fields from source", "Format changed. Old format was a bug.") - task.skipTest("runtime_fields/101_geo_point_from_source/fetch fields from source", "Format changed. Old format was a bug.") - task.skipTest("runtime_fields/102_geo_point_source_in_query/fetch fields from source", "Format changed. Old format was a bug.") - task.skipTest("runtime_fields/103_geo_point_calculated_at_index/fetch fields from source", "Format changed. Old format was a bug.") -} diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/10_keyword.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/10_keyword.yml index 11214907eb17..8728d4ac413b 100644 --- a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/10_keyword.yml +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/10_keyword.yml @@ -19,7 +19,7 @@ setup: script: | Instant instant = Instant.ofEpochMilli(params._source.timestamp); ZonedDateTime dt = ZonedDateTime.ofInstant(instant, ZoneId.of("UTC")); - emit(dt.dayOfWeek.getDisplayName(TextStyle.FULL, Locale.ROOT)); + emit(dt.dayOfWeek.getDisplayName(TextStyle.FULL, Locale.ENGLISH)); # Test fetching many values day_of_week_letters: type: keyword @@ -218,7 +218,7 @@ setup: day_of_week: type: keyword script: | - emit(doc['timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.SHORT, Locale.ROOT)); + emit(doc['timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.SHORT, Locale.ENGLISH)); - do: search: index: sensor diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/13_keyword_calculated_at_index.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/13_keyword_calculated_at_index.yml index 4bedfa3e923a..c27ddab72bff 100644 --- a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/13_keyword_calculated_at_index.yml +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/13_keyword_calculated_at_index.yml @@ -28,7 +28,7 @@ setup: script: | Instant instant = Instant.ofEpochMilli(params._source.timestamp); ZonedDateTime dt = ZonedDateTime.ofInstant(instant, ZoneId.of("UTC")); - emit(dt.dayOfWeek.getDisplayName(TextStyle.FULL, Locale.ROOT)); + emit(dt.dayOfWeek.getDisplayName(TextStyle.FULL, Locale.ENGLISH)); # Test fetching many values day_of_week_letters: type: keyword diff --git a/muted-tests.yml b/muted-tests.yml index a8136219b3da..88d532d39e81 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -5,9 +5,6 @@ tests: - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/esql/esql-async-query-api/line_17} issue: https://github.com/elastic/elasticsearch/issues/109260 -- class: "org.elasticsearch.index.engine.frozen.FrozenIndexIT" - issue: "https://github.com/elastic/elasticsearch/issues/109315" - method: "testTimestampFieldTypeExposedByAllIndicesServices" - class: "org.elasticsearch.analysis.common.CommonAnalysisClientYamlTestSuiteIT" issue: "https://github.com/elastic/elasticsearch/issues/109318" method: "test {yaml=analysis-common/50_char_filters/pattern_replace error handling (too complex pattern)}" @@ -38,9 +35,6 @@ tests: - class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT method: testCcsMinimizeRoundtripsIsFalse issue: https://github.com/elastic/elasticsearch/issues/101974 -- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT - method: testMinVersionAsOldVersion - issue: https://github.com/elastic/elasticsearch/issues/109454 - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" @@ -101,9 +95,6 @@ tests: - class: org.elasticsearch.xpack.restart.CoreFullClusterRestartIT method: testSnapshotRestore {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/111799 -- class: org.elasticsearch.xpack.esql.qa.mixed.FieldExtractorIT - method: testScaledFloat - issue: https://github.com/elastic/elasticsearch/issues/112003 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/80_random_rerank_retriever/Random rerank retriever predictably shuffles results} issue: https://github.com/elastic/elasticsearch/issues/111999 @@ -124,9 +115,6 @@ tests: - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testDeleteJobAsync issue: https://github.com/elastic/elasticsearch/issues/112212 -- class: org.elasticsearch.search.retriever.rankdoc.RankDocsSortBuilderTests - method: testEqualsAndHashcode - issue: https://github.com/elastic/elasticsearch/issues/112312 - class: org.elasticsearch.search.retriever.RankDocRetrieverBuilderIT method: testRankDocsRetrieverWithCollapse issue: https://github.com/elastic/elasticsearch/issues/112254 @@ -141,9 +129,6 @@ tests: - class: org.elasticsearch.xpack.ml.integration.MlJobIT method: testMultiIndexDelete issue: https://github.com/elastic/elasticsearch/issues/112381 -- class: org.elasticsearch.xpack.searchablesnapshots.cache.shared.NodesCachesStatsIntegTests - method: testNodesCachesStats - issue: https://github.com/elastic/elasticsearch/issues/112384 - class: org.elasticsearch.action.admin.cluster.stats.CCSTelemetrySnapshotTests method: testToXContent issue: https://github.com/elastic/elasticsearch/issues/112325 @@ -167,17 +152,68 @@ tests: - class: org.elasticsearch.xpack.inference.external.http.RequestBasedTaskRunnerTests method: testLoopOneAtATime issue: https://github.com/elastic/elasticsearch/issues/112471 -- class: org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests - method: testClientServiceMutualAuthentication - issue: https://github.com/elastic/elasticsearch/issues/112529 - class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/111497 - class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT method: test {yaml=ingest/80_ingest_simulate/Test ingest simulate with reroute and mapping validation from templates} issue: https://github.com/elastic/elasticsearch/issues/112575 -- class: org.elasticsearch.script.mustache.LangMustacheClientYamlTestSuiteIT - method: test {yaml=lang_mustache/50_multi_search_template/Multi-search template with errors} - issue: https://github.com/elastic/elasticsearch/issues/112580 +- class: org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests + method: testClientServiceMutualAuthentication + issue: https://github.com/elastic/elasticsearch/issues/112529 +- class: org.elasticsearch.search.basic.SearchWhileRelocatingIT + method: testSearchAndRelocateConcurrentlyRandomReplicas + issue: https://github.com/elastic/elasticsearch/issues/112515 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=terms_enum/10_basic/Test search after on unconfigured constant keyword field} + issue: https://github.com/elastic/elasticsearch/issues/112624 +- class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT + method: testIndexPatternErrorMessageComparison_ESQL_SearchDSL + issue: https://github.com/elastic/elasticsearch/issues/112630 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testPutJob_GivenFarequoteConfig + issue: https://github.com/elastic/elasticsearch/issues/112382 +- class: org.elasticsearch.xpack.security.authc.kerberos.KerberosTicketValidatorTests + method: testWhenKeyTabWithInvalidContentFailsValidation + issue: https://github.com/elastic/elasticsearch/issues/112631 +- class: org.elasticsearch.xpack.security.authc.kerberos.KerberosTicketValidatorTests + method: testValidKebrerosTicket + issue: https://github.com/elastic/elasticsearch/issues/112632 +- class: org.elasticsearch.xpack.security.authc.kerberos.KerberosTicketValidatorTests + method: testKerbTicketGeneratedForDifferentServerFailsValidation + issue: https://github.com/elastic/elasticsearch/issues/112639 +- class: org.elasticsearch.packaging.test.PackagesSecurityAutoConfigurationTests + method: test20SecurityNotAutoConfiguredOnReInstallation + issue: https://github.com/elastic/elasticsearch/issues/112635 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT + method: test {case-functions.testSelectInsertWithLcaseAndLengthWithOrderBy} + issue: https://github.com/elastic/elasticsearch/issues/112642 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT + method: test {case-functions.testUcaseInline1} + issue: https://github.com/elastic/elasticsearch/issues/112641 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT + method: test {case-functions.testUpperCasingTheSecondLetterFromTheRightFromFirstName} + issue: https://github.com/elastic/elasticsearch/issues/112640 +- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcSqlSpecIT + method: test {case-functions.testUcaseInline3} + issue: https://github.com/elastic/elasticsearch/issues/112643 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testDelete_multipleRequest + issue: https://github.com/elastic/elasticsearch/issues/112701 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testCreateJobInSharedIndexUpdatesMapping + issue: https://github.com/elastic/elasticsearch/issues/112729 +- class: org.elasticsearch.xpack.ml.integration.MlJobIT + method: testGetJob_GivenNoSuchJob + issue: https://github.com/elastic/elasticsearch/issues/112730 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/cluster/stats/line_1450} + issue: https://github.com/elastic/elasticsearch/issues/112732 +- class: org.elasticsearch.script.StatsSummaryTests + method: testEqualsAndHashCode + issue: https://github.com/elastic/elasticsearch/issues/112439 +- class: org.elasticsearch.snapshots.ConcurrentSnapshotsIT + method: testMasterFailoverOnFinalizationLoop + issue: https://github.com/elastic/elasticsearch/issues/112811 # Examples: # diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index 1c7db6d040be..eed88b3232a4 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -43,7 +43,3 @@ tasks.named("dependencyLicenses").configure { mapping from: /lucene-.*/, to: 'lucene' } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - task.skipTest("analysis_icu/10_basic/Normalization with deprecated unicodeSetFilter", "Cleanup versioned deprecations in analysis #41560") -} - diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java index 2d27447b618e..7a0caf56d606 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapper.java @@ -336,15 +336,7 @@ public ICUCollationKeywordFieldMapper build(MapperBuilderContext context) { ignoreAbove.getValue(), meta.getValue() ); - return new ICUCollationKeywordFieldMapper( - leafName(), - buildFieldType(), - ft, - multiFieldsBuilder.build(this, context), - copyTo, - collator, - this - ); + return new ICUCollationKeywordFieldMapper(leafName(), buildFieldType(), ft, builderParams(this, context), collator, this); } } @@ -474,12 +466,11 @@ protected ICUCollationKeywordFieldMapper( String simpleName, FieldType fieldType, MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, Collator collator, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); assert collator.isFrozen(); this.fieldType = freezeAndDeduplicateFieldType(fieldType); this.params = builder.collatorParams(); diff --git a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java index 348e9f5fae7c..c83d8b789611 100644 --- a/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java +++ b/plugins/analysis-phonetic/src/test/java/org/elasticsearch/plugin/analysis/phonetic/AnalysisPhoneticFactoryTests.java @@ -43,7 +43,7 @@ public void testDisallowedWithSynonyms() throws IOException { Settings settings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.V_7_0_0, IndexVersion.current()) + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()) ) .put("path.home", createTempDir().toString()) .build(); diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java index 9a55bfde38b3..a3e8a3a02f93 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureSimpleTests.java @@ -30,7 +30,7 @@ public void testOneNodeShouldRunUsingPrivateIp() { assertNotNull( client().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .get() .getState() @@ -52,7 +52,7 @@ public void testOneNodeShouldRunUsingPublicIp() { assertNotNull( client().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .get() .getState() diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java index b8d0a1ef7bdd..6c0767026627 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/elasticsearch/discovery/azure/classic/AzureTwoStartedNodesTests.java @@ -33,7 +33,7 @@ public void testTwoNodesShouldRunUsingPrivateOrPublicIp() { assertNotNull( client().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .get() .getState() @@ -47,7 +47,7 @@ public void testTwoNodesShouldRunUsingPrivateOrPublicIp() { assertNotNull( client().admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .get() .getState() diff --git a/plugins/discovery-ec2/src/internalClusterTest/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java b/plugins/discovery-ec2/src/internalClusterTest/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java index 033e0e382353..0ed530c9ee3d 100644 --- a/plugins/discovery-ec2/src/internalClusterTest/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java +++ b/plugins/discovery-ec2/src/internalClusterTest/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryUpdateSettingsTests.java @@ -31,7 +31,7 @@ public void testMinimumMasterNodesStart() { // We try to update a setting now final String expectedValue = UUIDs.randomBase64UUID(random()); final String settingName = "cluster.routing.allocation.exclude.any_attribute"; - final ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings() + final ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(settingName, expectedValue)) .get(); diff --git a/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java b/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java index 32be38ac7f81..ca8a4449c4d6 100644 --- a/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java +++ b/plugins/discovery-gce/src/internalClusterTest/java/org/elasticsearch/discovery/gce/GceDiscoverTests.java @@ -67,7 +67,7 @@ public void testJoin() { ClusterStateResponse clusterStateResponse = client(masterNode).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .clear() .setNodes(true) @@ -79,7 +79,7 @@ public void testJoin() { registerGceNode(secondNode); clusterStateResponse = client(secondNode).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueSeconds(1)) .clear() .setNodes(true) @@ -88,13 +88,13 @@ public void testJoin() { assertNotNull(clusterStateResponse.getState().nodes().getMasterNodeId()); // wait for the cluster to form - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get()); + assertNoTimeout(client().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes(Integer.toString(2)).get()); assertNumberOfNodes(2); // add one more node and wait for it to join final String thirdNode = internalCluster().startDataOnlyNode(); registerGceNode(thirdNode); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get()); + assertNoTimeout(client().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes(Integer.toString(3)).get()); assertNumberOfNodes(3); } diff --git a/plugins/examples/custom-processor/build.gradle b/plugins/examples/custom-processor/build.gradle new file mode 100644 index 000000000000..69da64d8ebe8 --- /dev/null +++ b/plugins/examples/custom-processor/build.gradle @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +apply plugin: 'elasticsearch.esplugin' +apply plugin: 'elasticsearch.yaml-rest-test' + +esplugin { + name 'custom-processor' + description 'An example plugin showing how to register a custom ingest processor' + classname 'org.elasticsearch.example.customprocessor.ExampleProcessorPlugin' + licenseFile rootProject.file('SSPL-1.0+ELASTIC-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') +} + +dependencies { + yamlRestTestRuntimeOnly "org.apache.logging.log4j:log4j-core:${log4jVersion}" +} diff --git a/plugins/examples/custom-processor/src/main/java/org/elasticsearch/example/customprocessor/ExampleProcessorPlugin.java b/plugins/examples/custom-processor/src/main/java/org/elasticsearch/example/customprocessor/ExampleProcessorPlugin.java new file mode 100644 index 000000000000..1ba145a92ca7 --- /dev/null +++ b/plugins/examples/custom-processor/src/main/java/org/elasticsearch/example/customprocessor/ExampleProcessorPlugin.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.customprocessor; + +import org.elasticsearch.ingest.Processor; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.plugins.Plugin; + +import java.util.Map; + +public class ExampleProcessorPlugin extends Plugin implements IngestPlugin { + + @Override + public Map getProcessors(Processor.Parameters parameters) { + return Map.of(ExampleRepeatProcessor.TYPE, new ExampleRepeatProcessor.Factory()); + } +} diff --git a/plugins/examples/custom-processor/src/main/java/org/elasticsearch/example/customprocessor/ExampleRepeatProcessor.java b/plugins/examples/custom-processor/src/main/java/org/elasticsearch/example/customprocessor/ExampleRepeatProcessor.java new file mode 100644 index 000000000000..f0f942459281 --- /dev/null +++ b/plugins/examples/custom-processor/src/main/java/org/elasticsearch/example/customprocessor/ExampleRepeatProcessor.java @@ -0,0 +1,53 @@ +package org.elasticsearch.example.customprocessor; + +import org.elasticsearch.ingest.AbstractProcessor; +import org.elasticsearch.ingest.ConfigurationUtils; +import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; + +import java.util.Map; + +/** + * Example of adding an ingest processor with a plugin. + */ +public class ExampleRepeatProcessor extends AbstractProcessor { + public static final String TYPE = "repeat"; + public static final String FIELD_KEY_NAME = "field"; + + private final String field; + + ExampleRepeatProcessor(String tag, String description, String field) { + super(tag, description); + this.field = field; + } + + @Override + public IngestDocument execute(IngestDocument document) { + Object val = document.getFieldValue(field, Object.class, true); + + if (val instanceof String string) { + String repeated = string.concat(string); + document.setFieldValue(field, repeated); + } + return document; + } + + @Override + public String getType() { + return TYPE; + } + + public static class Factory implements Processor.Factory { + + @Override + public ExampleRepeatProcessor create( + Map registry, + String tag, + String description, + Map config + ) { + String field = ConfigurationUtils.readStringProperty(TYPE, tag, config, FIELD_KEY_NAME); + return new ExampleRepeatProcessor(tag, description, field); + } + } +} diff --git a/plugins/examples/custom-processor/src/yamlRestTest/java/org/elasticsearch/example/customprocessor/ExampleProcessorClientYamlTestSuiteIT.java b/plugins/examples/custom-processor/src/yamlRestTest/java/org/elasticsearch/example/customprocessor/ExampleProcessorClientYamlTestSuiteIT.java new file mode 100644 index 000000000000..ac08df358fe5 --- /dev/null +++ b/plugins/examples/custom-processor/src/yamlRestTest/java/org/elasticsearch/example/customprocessor/ExampleProcessorClientYamlTestSuiteIT.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.example.customprocessor; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +/** + * {@link ExampleProcessorClientYamlTestSuiteIT} executes the plugin's REST API integration tests. + *

+ * The tests can be executed using the command: ./gradlew :custom-processor:yamlRestTest + *

+ * This class extends {@link ESClientYamlSuiteTestCase}, which takes care of parsing the YAML files + * located in the src/yamlRestTest/resources/rest-api-spec/test/ directory and validates them against the + * custom REST API definition files located in src/yamlRestTest/resources/rest-api-spec/api/. + *

+ * Once validated, {@link ESClientYamlSuiteTestCase} executes the REST tests against a single node + * integration cluster which has the plugin already installed by the Gradle build script. + *

+ */ +public class ExampleProcessorClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public ExampleProcessorClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + // The test executes all the test candidates by default + // see ESClientYamlSuiteTestCase.REST_TESTS_SUITE + return ESClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/examples/custom-processor/src/yamlRestTest/resources/rest-api-spec/test/customprocessor/10_basic.yml b/plugins/examples/custom-processor/src/yamlRestTest/resources/rest-api-spec/test/customprocessor/10_basic.yml new file mode 100644 index 000000000000..40f5835fe976 --- /dev/null +++ b/plugins/examples/custom-processor/src/yamlRestTest/resources/rest-api-spec/test/customprocessor/10_basic.yml @@ -0,0 +1,15 @@ +"Custom processor is present": + - do: + ingest.put_pipeline: + id: pipeline1 + body: > + { + "processors": [ + { + "repeat" : { + "field": "test" + } + } + ] + } + - match: { acknowledged: true } diff --git a/plugins/examples/custom-processor/src/yamlRestTest/resources/rest-api-spec/test/customprocessor/20_process_document.yml b/plugins/examples/custom-processor/src/yamlRestTest/resources/rest-api-spec/test/customprocessor/20_process_document.yml new file mode 100644 index 000000000000..7e8bc2e0a2d7 --- /dev/null +++ b/plugins/examples/custom-processor/src/yamlRestTest/resources/rest-api-spec/test/customprocessor/20_process_document.yml @@ -0,0 +1,59 @@ +setup: + - do: + ingest.put_pipeline: + id: pipeline1 + body: > + { + "processors": [ + { + "repeat" : { + "field": "to_repeat" + } + } + ] + } +--- +teardown: + - do: + ingest.delete_pipeline: + id: pipeline1 + ignore: 404 + + - do: + indices.delete: + index: index1 + ignore: 404 +--- +"Process document": + # index a document with field to be processed + - do: + index: + id: doc1 + index: index1 + pipeline: pipeline1 + body: { to_repeat: "foo" } + - match: { result: "created" } + + # validate document is processed + - do: + get: + index: index1 + id: doc1 + - match: { _source: { to_repeat: "foofoo" } } +--- +"Does not process document without field": + # index a document without field to be processed + - do: + index: + id: doc1 + index: index1 + pipeline: pipeline1 + body: { field1: "foo" } + - match: { result: "created" } + + # validate document is not processed + - do: + get: + index: index1 + id: doc1 + - match: { _source: { field1: "foo" } } diff --git a/plugins/examples/custom-settings/src/test/java/org/elasticsearch/example/customsettings/ExampleCustomSettingsConfigTests.java b/plugins/examples/custom-settings/src/test/java/org/elasticsearch/example/customsettings/ExampleCustomSettingsConfigTests.java index f5e205500d23..23a134d58b8a 100644 --- a/plugins/examples/custom-settings/src/test/java/org/elasticsearch/example/customsettings/ExampleCustomSettingsConfigTests.java +++ b/plugins/examples/custom-settings/src/test/java/org/elasticsearch/example/customsettings/ExampleCustomSettingsConfigTests.java @@ -17,7 +17,7 @@ *

* It's a JUnit test class that extends {@link ESTestCase} which provides useful methods for testing. *

- * The tests can be executed in the IDE or using the command: ./gradlew :example-plugins:custom-settings:test + * The tests can be executed in the IDE or using the command: ./gradlew :custom-settings:test */ public class ExampleCustomSettingsConfigTests extends ESTestCase { diff --git a/plugins/examples/custom-settings/src/yamlRestTest/java/org/elasticsearch/example/customsettings/ExampleCustomSettingsClientYamlTestSuiteIT.java b/plugins/examples/custom-settings/src/yamlRestTest/java/org/elasticsearch/example/customsettings/ExampleCustomSettingsClientYamlTestSuiteIT.java index 40a8af569b33..9377cc7afd47 100644 --- a/plugins/examples/custom-settings/src/yamlRestTest/java/org/elasticsearch/example/customsettings/ExampleCustomSettingsClientYamlTestSuiteIT.java +++ b/plugins/examples/custom-settings/src/yamlRestTest/java/org/elasticsearch/example/customsettings/ExampleCustomSettingsClientYamlTestSuiteIT.java @@ -15,7 +15,7 @@ /** * {@link ExampleCustomSettingsClientYamlTestSuiteIT} executes the plugin's REST API integration tests. *

- * The tests can be executed using the command: ./gradlew :example-plugins:custom-settings:yamlRestTest + * The tests can be executed using the command: ./gradlew :custom-settings:yamlRestTest *

* This class extends {@link ESClientYamlSuiteTestCase}, which takes care of parsing the YAML files * located in the src/yamlRestTest/resources/rest-api-spec/test/ directory and validates them against the diff --git a/plugins/examples/rest-handler/src/yamlRestTest/java/org/elasticsearch/example/resthandler/ExampleRestHandlerClientYamlTestSuiteIT.java b/plugins/examples/rest-handler/src/yamlRestTest/java/org/elasticsearch/example/resthandler/ExampleRestHandlerClientYamlTestSuiteIT.java index e6d5ac688cce..9ebfc5ebfe9d 100644 --- a/plugins/examples/rest-handler/src/yamlRestTest/java/org/elasticsearch/example/resthandler/ExampleRestHandlerClientYamlTestSuiteIT.java +++ b/plugins/examples/rest-handler/src/yamlRestTest/java/org/elasticsearch/example/resthandler/ExampleRestHandlerClientYamlTestSuiteIT.java @@ -15,7 +15,7 @@ /** * {@link ExampleRestHandlerClientYamlTestSuiteIT} executes the plugin's REST API integration tests. *

- * The tests can be executed using the command: ./gradlew :example-plugins:rest-handler:yamlRestTest + * The tests can be executed using the command: ./gradlew :rest-handler:yamlRestTest *

* This class extends {@link ESClientYamlSuiteTestCase}, which takes care of parsing the YAML files * located in the src/yamlRestTest/resources/rest-api-spec/test/ directory and validates them against the diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 8d50a9f7e29a..b66ce41d3259 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MapperBuilderContext; -import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.StringStoredFieldFieldLoader; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.TextParams; @@ -46,7 +45,6 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -150,7 +148,6 @@ private AnnotatedTextFieldType buildFieldType(FieldType fieldType, MapperBuilder @Override public AnnotatedTextFieldMapper build(MapperBuilderContext context) { - MultiFields multiFields = multiFieldsBuilder.build(this, context); FieldType fieldType = TextParams.buildFieldType(() -> true, store, indexOptions, norms, termVectors); if (fieldType.indexOptions() == IndexOptions.NONE) { throw new IllegalArgumentException("[" + CONTENT_TYPE + "] fields must be indexed"); @@ -162,12 +159,12 @@ public AnnotatedTextFieldMapper build(MapperBuilderContext context) { ); } } + BuilderParams builderParams = builderParams(this, context); return new AnnotatedTextFieldMapper( leafName(), fieldType, - buildFieldType(fieldType, context, multiFields), - multiFields, - copyTo, + buildFieldType(fieldType, context, builderParams.multiFields()), + builderParams, this ); } @@ -523,11 +520,10 @@ protected AnnotatedTextFieldMapper( String simpleName, FieldType fieldType, AnnotatedTextFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, + BuilderParams builderParams, Builder builder ) { - super(simpleName, mappedFieldType, multiFields, copyTo); + super(simpleName, mappedFieldType, builderParams); assert fieldType.tokenized(); this.fieldType = freezeAndDeduplicateFieldType(fieldType); this.builder = builder; @@ -572,39 +568,23 @@ public FieldMapper.Builder getMergeBuilder() { } @Override - protected SyntheticSourceMode syntheticSourceMode() { - return SyntheticSourceMode.NATIVE; - } - - @Override - public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - if (copyTo.copyToFields().isEmpty() != true) { - throw new IllegalArgumentException( - "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" - ); - } + protected SyntheticSourceSupport syntheticSourceSupport() { if (fieldType.stored()) { - return new StringStoredFieldFieldLoader(fullPath(), leafName()) { + var loader = new StringStoredFieldFieldLoader(fullPath(), leafName()) { @Override protected void write(XContentBuilder b, Object value) throws IOException { b.value((String) value); } }; + + return new SyntheticSourceSupport.Native(loader); } var kwd = TextFieldMapper.SyntheticSourceHelper.getKeywordFieldMapperForSyntheticSource(this); if (kwd != null) { - return kwd.syntheticFieldLoader(leafName()); + return new SyntheticSourceSupport.Native(kwd.syntheticFieldLoader(leafName())); } - throw new IllegalArgumentException( - String.format( - Locale.ROOT, - "field [%s] of type [%s] doesn't support synthetic source unless it is stored or has a sub-field of" - + " type [keyword] with doc values or stored and without a normalizer", - fullPath(), - typeName() - ) - ); + return super.syntheticSourceSupport(); } } diff --git a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml index 54a51e60f56d..4aac881700e1 100644 --- a/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml +++ b/plugins/mapper-annotated-text/src/yamlRestTest/resources/rest-api-spec/test/mapper_annotatedtext/20_synthetic_source.yml @@ -195,3 +195,34 @@ multiple values in stored annotated_text field with keyword multi-field: - match: hits.hits.0._source: annotated_text: ["world", "hello", "world"] + +--- +fallback synthetic source: + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + annotated_text: + type: annotated_text + store: false + + - do: + index: + index: test + id: 1 + refresh: true + body: + annotated_text: ["world", "hello", "world"] + + - do: + search: + index: test + + - match: + hits.hits.0._source: + annotated_text: ["world", "hello", "world"] + diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index 0b29bc906291..979ca842ef34 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -57,8 +57,7 @@ public Murmur3FieldMapper build(MapperBuilderContext context) { return new Murmur3FieldMapper( leafName(), new Murmur3FieldType(context.buildFullName(leafName()), stored.getValue(), meta.getValue()), - multiFieldsBuilder.build(this, context), - copyTo + builderParams(this, context) ); } } @@ -94,8 +93,8 @@ public Query termQuery(Object value, SearchExecutionContext context) { } } - protected Murmur3FieldMapper(String simpleName, MappedFieldType mappedFieldType, MultiFields multiFields, CopyTo copyTo) { - super(simpleName, mappedFieldType, multiFields, copyTo); + protected Murmur3FieldMapper(String simpleName, MappedFieldType mappedFieldType, BuilderParams builderParams) { + super(simpleName, mappedFieldType, builderParams); } @Override diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index b188f4b14859..6a4869b8c89b 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -16,7 +16,6 @@ import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; -import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; @@ -97,9 +96,4 @@ public void postParse(DocumentParserContext context) { public FieldMapper.Builder getMergeBuilder() { return new Builder().init(this); } - - @Override - public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - return SourceLoader.SyntheticFieldLoader.NOTHING; - } } diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index 081c6c26319a..39bc59012ed0 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -141,7 +141,7 @@ public void testSimpleWorkflow() { assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); assertThat(count(client, "test-idx-1"), equalTo(100L)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex("test-idx-1"), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex("test-idx-2"), equalTo(false)); final BlobStoreRepository repo = (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo"); diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java index c57112d0455c..120b6bdf3288 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -36,7 +36,8 @@ @TestCaseOrdering(FullClusterRestartTestOrdering.class) public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTestCase { - private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); + + private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString(System.getProperty("tests.minimum.wire.compatible")); private static final String OLD_CLUSTER_VERSION = System.getProperty("tests.old_cluster_version"); private static IndexVersion oldIndexVersion; private static boolean upgradeFailed = false; diff --git a/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java index bd7e086d01f0..5af036a9a039 100644 --- a/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java +++ b/qa/restricted-loggers/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -29,7 +29,7 @@ public class LoggersTests extends ESTestCase { public void testClusterUpdateSettingsRequestValidationForLoggers() { assertThat(Loggers.RESTRICTED_LOGGERS, hasSize(greaterThan(0))); - ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(); + ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); for (String logger : Loggers.RESTRICTED_LOGGERS) { var validation = request.persistentSettings(Map.of("logger." + logger, org.elasticsearch.logging.Level.DEBUG)).validate(); assertNotNull(validation); diff --git a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index fe2236adc490..b0025302701a 100644 --- a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -419,7 +419,7 @@ public void testRecoveryClosedIndex() throws Exception { } final IndexVersion indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(IndexVersions.V_7_2_0)) { + if (indexVersionCreated.onOrAfter(IndexVersions.V_8_0_0)) { // index was created on a version that supports the replication of closed indices, // so we expect the index to be closed and replicated ensureGreen(indexName); @@ -448,7 +448,7 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { closeIndex(indexName); } - if (minimumIndexVersion().onOrAfter(IndexVersions.V_7_2_0)) { + if (minimumIndexVersion().onOrAfter(IndexVersions.V_8_0_0)) { // index is created on a version that supports the replication of closed indices, // so we expect the index to be closed and replicated ensureGreen(indexName); @@ -483,9 +483,9 @@ public void testClosedIndexNoopRecovery() throws Exception { closeIndex(indexName); } - if (indexVersionCreated(indexName).onOrAfter(IndexVersions.V_7_2_0)) { + if (indexVersionCreated(indexName).onOrAfter(IndexVersions.V_8_0_0)) { // index was created on a version that supports the replication of closed indices, so we expect it to be closed and replicated - assertTrue(minimumIndexVersion().onOrAfter(IndexVersions.V_7_2_0)); + assertTrue(minimumIndexVersion().onOrAfter(IndexVersions.V_8_0_0)); ensureGreen(indexName); assertClosedIndex(indexName, true); if (CLUSTER_TYPE != ClusterType.OLD) { diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index 1a7701991428..35ec9979c325 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -217,7 +217,9 @@ setup: "Test ingest simulate with reroute and mapping validation from templates": - skip: - features: headers + features: + - headers + - allowed_warnings - requires: cluster_features: ["simulate.mapping.validation.templates"] @@ -241,6 +243,8 @@ setup: - match: { acknowledged: true } - do: + allowed_warnings: + - "index template [first-index-template] has index patterns [first-index*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [first-index-template] will take precedence during new index creation" indices.put_index_template: name: first-index-template body: @@ -255,6 +259,8 @@ setup: type: text - do: + allowed_warnings: + - "index template [second-index-template] has index patterns [second-index*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [second-index-template] will take precedence during new index creation" indices.put_index_template: name: second-index-template body: diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 089b7470e9a9..015c9c4b812c 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -41,194 +41,6 @@ dependencies { clusterModules project(':modules:data-streams') } -tasks.named("yamlRestTestV7CompatTransform").configure { task -> - - task.skipTestsByFilePattern("**/cat*/*.yml", "Cat API are meant to be consumed by humans, so will not be supported by Compatible REST API") - task.skipTestsByFilePattern("**/indices.upgrade/*.yml", "upgrade api will only get a dummy endpoint returning an exception suggesting to use _reindex") - task.skipTestsByFilePattern("**/indices.stats/60_field_usage/*/*.yml", "field usage results will be different between lucene versions") - task.skipTestsByFilePattern("**/search.aggregation/*.yml", "run by the aggregation module") - - task.skipTest("bulk/11_dynamic_templates/Dynamic templates", "Error message has changed") - task.skipTest("index/80_date_nanos/date_nanos requires dates after 1970 and before 2262", "Error message has changed") - task.skipTest("indices.create/20_mix_typeless_typeful/Implicitly create a typed index while there is a typeless template", "Type information about the type is removed and not passed down. The logic to check for this is also removed.") - task.skipTest("indices.create/20_mix_typeless_typeful/Implicitly create a typeless index while there is a typed template", "Type information about the type is removed and not passed down. The logic to check for this is also removed.") - task.skipTest("delete/70_mix_typeless_typeful/DELETE with typeless API on an index that has types", "Type information about the type is removed and not passed down. The logic to check for this is also removed."); - task.skipTest("get/100_mix_typeless_typeful/GET with typeless API on an index that has types", "Failing due to not recognising missing type (the type path param is ignored, will no be fixed"); - task.skipTest("indices.get_field_mapping/21_missing_field_with_types/Return empty object if field doesn't exist, but type and index do", "This test returns test_index.mappings:{} when {} was expected. difference between 20_missing_field and 21_missing_field_with_types?") - task.skipTest("indices.get_field_mapping/30_missing_type/Raise 404 when type doesn't exist", "The information about the type is not present in the index. hence it cannot know if the type exist or not.") - task.skipTest("indices.get_mapping/20_missing_type/Existent and non-existent type returns 404 and the existing type", " The information about the type is not present in the index. hence it cannot know if the type exist or not") - task.skipTest("indices.get_mapping/20_missing_type/Existent and non-existent types returns 404 and the existing type", "The information about the type is not present in the index. hence it cannot know if the type exist or not.") - task.skipTest("indices.get_mapping/20_missing_type/No type matching pattern returns 404", "The information about the type is not present in the index. hence it cannot know if the type exist or not.") - task.skipTest("indices.get_mapping/20_missing_type/Non-existent type returns 404", "The information about the type is not present in the index. hence it cannot know if the type exist or not.") - task.skipTest("indices.get_mapping/20_missing_type/Type missing when no types exist", "The information about the type is not present in the index. hence it cannot know if the type exist or not.") - task.skipTest("indices.put_mapping/20_mix_typeless_typeful/PUT mapping with _doc on an index that has types", "The information about the type is not present in the index. hence it cannot know if the type was already used or not") - task.skipTest("indices.put_mapping/20_mix_typeless_typeful/PUT mapping with typeless API on an index that has types", "The information about the type is not present in the index. hence it cannot know if the type was already used or not") - task.skipTest("search/160_exists_query/Test exists query on _type field", "There is a small distinction between empty mappings and no mappings at all. The code to implement this test was refactored #54003; field search on _type field- not implementing. The data for _type is considered incorrect in this search") - task.skipTest("termvectors/50_mix_typeless_typeful/Term vectors with typeless API on an index that has types", "type information is not stored, hence the the index will be found") - task.skipTest("mget/11_default_index_type/Default index/type", "mget - these use cases are no longer valid because we always default to _doc.; This mean test cases where there is assertion on not finding by type won't work") - task.skipTest("mget/16_basic_with_types/Basic multi-get", "mget - these use cases are no longer valid, because we always default to _doc.; This mean test cases where there is assertion on not finding by type won't work") - task.skipTest("explain/40_mix_typeless_typeful/Explain with typeless API on an index that has types", "asserting about type not found won't work as we ignore the type information") - task.skipTest("indices.stats/20_translog/Translog retention settings are deprecated", "translog settings removal is not supported under compatible api") - task.skipTest("indices.stats/20_translog/Translog retention without soft_deletes", "translog settings removal is not supported under compatible api") - task.skipTest("indices.stats/20_translog/Translog stats on closed indices without soft-deletes", "translog settings removal is not supported under compatible api") - task.skipTest("indices.create/10_basic/Create index without soft deletes", "Make soft-deletes mandatory in 8.0 #51122 - settings changes are note supported in Rest Api compatibility") - task.skipTest("field_caps/30_filter/Field caps with index filter", "behaviour change after #63692 4digits dates are parsed as epoch and in quotes as year") - task.skipTest("indices.forcemerge/10_basic/Check deprecation warning when incompatible only_expunge_deletes and max_num_segments values are both set", "#44761 bug fix") - task.skipTest("search/340_type_query/type query", "#47207 type query throws exception in compatible mode") - task.skipTest("search/310_match_bool_prefix/multi_match multiple fields with cutoff_frequency throws exception", "#42654 cutoff_frequency, common terms are not supported. Throwing an exception") - task.skipTest("search_shards/10_basic/Search shards aliases with and without filters", "Filter representation no longer outputs default boosts") - task.skipTest("migration/10_get_feature_upgrade_status/Get feature upgrade status", "Awaits backport") - task.skipTest("search/330_fetch_fields/Test disable source", "Error no longer thrown") - task.skipTest("search/370_profile/fetch fields", "profile output has changed") - task.skipTest("search/370_profile/fetch source", "profile output has changed") - task.skipTest("search/370_profile/fetch nested source", "profile output has changed") - task.skipTest("search/240_date_nanos/doc value fields are working as expected across date and date_nanos fields", "Fetching docvalues field multiple times is no longer allowed") - task.skipTest("search/110_field_collapsing/field collapsing and rescore", "#107779 Field collapsing is compatible with rescore in 8.15") - - task.replaceValueInMatch("_type", "_doc") - task.addAllowedWarningRegex("\\[types removal\\].*") - task.replaceValueInMatch("nodes.\$node_id.roles.8", "ml", "node_info role test") - task.replaceValueInMatch("nodes.\$node_id.roles.9", "remote_cluster_client", "node_info role test") - task.removeMatch("nodes.\$node_id.roles.10", "node_info role test") - task.replaceIsTrue("test_index.mappings.type_1", "test_index.mappings._doc") - //override for indices.get and indices.create - //task.replaceIsFalse("test_index.mappings.type_1", "test_index.mappings._doc") - //overrides for indices.create/20_mix_typeless_typeful - task.replaceIsFalse("test-1.mappings._doc","false", "Create a typed index while there is a typeless template") - task.replaceIsFalse("test-1.mappings._doc","false", "Create a typeless index while there is a typed template") - - task.replaceIsTrue("test-1.mappings.my_type", "test-1.mappings._doc") - task.replaceIsTrue("test-1.mappings.my_type.properties.foo", "test-1.mappings._doc.properties.foo") - task.replaceIsTrue("test-1.mappings.my_type.properties.bar", "test-1.mappings._doc.properties.bar") - - // overrides for indices.get_field_mapping - task.replaceKeyInLength("test_index.mappings.test_type.text.mapping.text.type", - "test_index.mappings._doc.text.mapping.text.type" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.text.mapping.text.analyzer", - "test_index.mappings._doc.text.mapping.text.analyzer" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.t1.full_name", - "test_index.mappings._doc.t1.full_name" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.t2.full_name", - "test_index.mappings._doc.t2.full_name" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.obj\\.t1.full_name", - "test_index.mappings._doc.obj\\.t1.full_name" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.obj\\.i_t1.full_name", - "test_index.mappings._doc.obj\\.i_t1.full_name" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.obj\\.i_t3.full_name", - "test_index.mappings._doc.obj\\.i_t3.full_name" - ) - task.replaceKeyInLength("test_index.mappings.test_type", - "test_index.mappings._doc" - ) - task.replaceKeyInMatch("test_index_2.mappings.test_type_2.t1.full_name", - "test_index.mappings._doc.t1.full_name" - ) - task.replaceKeyInMatch("test_index_2.mappings.test_type_2.t2.full_name", - "test_index.mappings._doc.t2.full_name" - ) - task.replaceKeyInLength("test_index_2.mappings.test_type_2", - "test_index.mappings._doc" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.text.mapping.text.type", - "test_index.mappings._doc.text.mapping.text.type" - ) - // overrides for indices.put_mapping/11_basic_with_types - task.replaceKeyInMatch("test_index.mappings.test_type.properties.text1.type", - "test_index.mappings._doc.properties.text1.type" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.properties.text1.analyzer", - "test_index.mappings._doc.properties.text1.analyzer" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.properties.text2.type", - "test_index.mappings._doc.properties.text2.type" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.properties.text2.analyzer", - "test_index.mappings._doc.properties.text2.analyzer" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.properties.subfield.properties.text3.type", - "test_index.mappings._doc.properties.subfield.properties.text3.type" - ) - task.replaceKeyInMatch("test_index.mappings.test_type.properties.text1.fields.text_raw.type", - "test_index.mappings._doc.properties.text1.fields.text_raw.type" - ) - // overrides for indices.put_mapping/all_path_options_with_types - task.replaceKeyInMatch("test_index1.mappings.test_type.properties.text.type", - "test_index1.mappings._doc.properties.text.type" - ) - task.replaceKeyInMatch("test_index1.mappings.test_type.properties.text.analyzer", - "test_index1.mappings._doc.properties.text.analyzer" - ) - task.replaceKeyInMatch("test_index2.mappings.test_type.properties.text.type", - "test_index2.mappings._doc.properties.text.type" - ) - task.replaceKeyInMatch("test_index2.mappings.test_type.properties.text.analyzer", - "test_index2.mappings._doc.properties.text.analyzer" - ) - task.replaceKeyInMatch("foo.mappings.test_type.properties.text.type", - "foo.mappings._doc.properties.text.type" - ) - task.replaceKeyInMatch("foo.mappings.test_type.properties.text.analyzer", - "foo.mappings._doc.properties.text.analyzer" - ) - // overrides for indices.get_mapping - task.replaceIsTrue("test_1.mappings.doc", "test_1.mappings._doc") - task.replaceIsTrue("test_2.mappings.doc", "test_2.mappings._doc") - // overrides for mget - task.replaceValueInMatch("docs.0._type", "_doc" , "Basic multi-get") // index found, but no doc - task.replaceValueInMatch("docs.0._type", "_doc", "Default index/type") - task.replaceValueInMatch("docs.0._type", "_doc", "Non-existent index") - task.replaceValueInMatch("docs.0._type", "_doc", "Missing metadata") - task.replaceValueInMatch("docs.0._type", "_doc", "Multi Get with alias that resolves to multiple indices") - task.replaceValueInMatch("docs.1._type", "_doc", "Multi Get with alias that resolves to multiple indices") - task.replaceValueInMatch("docs.2._type", "_doc", "Multi Get with alias that resolves to multiple indices") - task.replaceValueInMatch("docs.0._type", "_doc", "IDs") - task.replaceValueInMatch("docs.1._type", "_doc", "IDs") - task.replaceValueInMatch("docs.2._type", "_doc", "Routing") - - //overrides for indices.stats - //TODO fix to remove the below match - task.replaceKeyInMatch("_all.primaries.indexing.types.baz.index_total", - "_all.primaries.indexing.types._doc.index_total" - ) - task.replaceKeyInMatch("_all.primaries.indexing.types.bar.index_total", - "_all.primaries.indexing.types._doc.index_total" - ) - task.replaceValueInMatch("_all.primaries.indexing.types._doc.index_total", 2) - // points get touched by sorting in ES 8 - task.replaceValueInMatch("testindex.shards.0.stats.fields.price.points", 1) - - //override for "indices.open/10_basic/?wait_for_active_shards default is deprecated" and "indices.open/10_basic/?wait_for_active_shards=index-setting" - task.addAllowedWarningRegexForTest("\\?wait_for_active_shards=index-setting is now the default behaviour.*", "?wait_for_active_shards=index-setting") - task.removeWarningForTest("the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; " + - "specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" - , "?wait_for_active_shards default is deprecated") - - // override for exception message change in #55291 tests cluster.voting_config_exclusions/10_basic/ - // 'Throw exception when adding voting config exclusion and specifying both node_ids and node_names', - // 'Throw exception when adding voting config exclusion without specifying nodes', - task.replaceValueTextByKeyValue("catch", - '/Please set node identifiers correctly. One and only one of \\[node_name\\], \\[node_names\\] and \\[node_ids\\] has to be set/', - '/You must set \\[node_names\\] or \\[node_ids\\] but not both/') - - // sync_id is no longer available in SegmentInfos.userData // "indices.flush/10_basic/Index synced flush rest test" - task.replaceIsTrue("indices.testing.shards.0.0.commit.user_data.sync_id", "indices.testing.shards.0.0.commit.user_data") - - // we can now search using doc values only - task.replaceValueInMatch("fields.object\\.nested1.long.searchable", true) - - //client.type no longer exists #101214 - task.replaceKeyInMatch("nodes.\$node_id.settings.client.type", "nodes.\$node_id.settings.node.attr.testattr") - task.replaceValueInMatch("nodes.\$node_id.settings.node.attr.testattr", "test") - task.replaceKeyInMatch("nodes.\$node_id.settings.client\\.type", "nodes.\$node_id.settings.node\\.attr\\.testattr") - task.replaceValueInMatch("nodes.\$node_id.settings.node\\.attr\\.testattr", "test") -} - tasks.register('enforceYamlTestConvention').configure { def tree = fileTree('src/main/resources/rest-api-spec/test') doLast { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json index 07f9e3774027..5447ea1e5a4e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_geoip_database.json @@ -7,7 +7,8 @@ "stability":"stable", "visibility":"public", "headers":{ - "accept": [ "application/json"] + "accept": [ "application/json"], + "content_type": ["application/json"] }, "url":{ "paths":[ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.repository_verify_integrity.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.repository_verify_integrity.json new file mode 100644 index 000000000000..bab8101b7455 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.repository_verify_integrity.json @@ -0,0 +1,65 @@ +{ + "snapshot.repository_verify_integrity":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "description":"Verifies the integrity of the contents of a snapshot repository" + }, + "stability":"experimental", + "visibility":"private", + "headers": { + "accept": [ + "application/json" + ] + }, + "url":{ + "paths":[ + { + "path":"/_snapshot/{repository}/_verify_integrity", + "methods":[ + "POST" + ], + "parts":{ + "repository":{ + "type":"string", + "description":"A repository name" + } + } + } + ] + }, + "params":{ + "meta_thread_pool_concurrency":{ + "type":"number", + "description":"Number of threads to use for reading metadata" + }, + "blob_thread_pool_concurrency":{ + "type":"number", + "description":"Number of threads to use for reading blob contents" + }, + "snapshot_verification_concurrency":{ + "type":"number", + "description":"Number of snapshots to verify concurrently" + }, + "index_verification_concurrency":{ + "type":"number", + "description":"Number of indices to verify concurrently" + }, + "index_snapshot_verification_concurrency":{ + "type":"number", + "description":"Number of snapshots to verify concurrently within each index" + }, + "max_failed_shard_snapshots":{ + "type":"number", + "description":"Maximum permitted number of failed shard snapshots" + }, + "verify_blob_contents":{ + "type":"boolean", + "description":"Whether to verify the contents of individual blobs" + }, + "max_bytes_per_sec":{ + "type":"string", + "description":"Rate limit for individual blob verification" + } + } + } +} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml index cf43797a451e..06139542c5e5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.stats/10_basic.yml @@ -258,8 +258,8 @@ --- "Dense vector stats": - requires: - cluster_features: [ "gte_v8.15.0" ] - reason: "dense vector stats reports from primary indices in 8.15" + cluster_features: [ "gte_v8.16.0" ] + reason: "dense vector stats reports from primary indices in 8.15 and fixed in 8.16" - do: indices.create: index: test1 @@ -329,9 +329,17 @@ - do: indices.refresh: { } + - do: + index: + index: test2 + id: "3" + refresh: true + body: + not_vector_field: "not vector" + - do: { cluster.stats: { } } - - match: { indices.docs.count: 4 } + - match: { indices.docs.count: 5 } - match: { indices.docs.deleted: 0 } - match: { indices.dense_vector.value_count: 8 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml index 55605849de69..f1e296ed8e30 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml @@ -129,50 +129,6 @@ force_synthetic_source_ok: kwd: foo - is_false: fields ---- -force_synthetic_source_bad_mapping: - - requires: - cluster_features: ["gte_v8.4.0"] - reason: introduced in 8.4.0 - - - do: - indices.create: - index: test - body: - settings: - number_of_shards: 1 # Use a single shard to get consistent error messages - mappings: - _source: - mode: stored - properties: - text: - type: text - - - do: - index: - index: test - id: 1 - refresh: true - body: - text: foo - - # When _source is used in the fetch the original _source is perfect - - do: - get: - index: test - id: 1 - - match: - _source: - text: foo - - # Forcing synthetic source fails because the mapping is invalid - - do: - catch: bad_request - get: - index: test - id: 1 - force_synthetic_source: true - --- stored text: - requires: @@ -1040,25 +996,6 @@ flattened field: - is_false: fields ---- -flattened field no doc values: - - requires: - cluster_features: ["gte_v8.8.0"] - reason: support for synthetic source on flattened fields added in 8.8.0 - - - do: - catch: /field \[flattened\] of type \[flattened\] doesn't support synthetic source because it doesn't have doc values/ - indices.create: - index: test - body: - mappings: - _source: - mode: synthetic - properties: - flattened: - type: flattened - doc_values: false - --- flattened field with ignore_above: - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 265aec75dc9c..9dd6cec6e657 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -1,23 +1,3 @@ -invalid: - - requires: - cluster_features: ["gte_v8.3.0"] - reason: introduced in 8.3.0 - - - do: - catch: bad_request - indices.create: - index: test - body: - mappings: - _source: - mode: synthetic - properties: - kwd: - type: boolean - doc_values: false - - ---- object with unmapped fields: - requires: cluster_features: ["mapper.track_ignored_source"] @@ -990,3 +970,247 @@ subobjects auto: - match: { hits.hits.3._source.id: 4 } - match: { hits.hits.3._source.auto_obj.foo: 40 } - match: { hits.hits.3._source.auto_obj.foo\.bar: 400 } + +--- +synthetic_source with copy_to: + - requires: + cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] + reason: requires copy_to support in synthetic source + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + number: + type: integer + copy_to: number_copy + number_copy: + type: keyword + boolean: + type: boolean + copy_to: boolean_copy + boolean_copy: + type: keyword + keyword: + type: keyword + copy_to: keyword_copy + keyword_copy: + type: keyword + date: + type: date + copy_to: date_copy + date_copy: + type: keyword + text: + type: text + copy_to: text_copy + text_copy: + type: keyword + ip: + type: ip + copy_to: ip_copy + ip_copy: + type: keyword + ip_range: + type: ip_range + copy_to: ip_range_copy + ip_range_copy: + type: keyword + geo_point: + type: geo_point + copy_to: geo_point_copy + geo_point_copy: + type: keyword + binary: + type: binary + copy_to: binary_copy + binary_copy: + type: keyword + scaled_float: + type: scaled_float + scaling_factor: 10 + copy_to: scaled_float_copy + scaled_float_copy: + type: keyword + + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - >- + { + "number": 100, + "boolean": false, + "keyword": "hello_keyword", + "date": "2015-01-01T12:10:30Z", + "text": "hello_text", + "match_only_text": "hello_match_only_text", + "ip": "192.168.1.1", + "ip_range": "10.0.0.0/24", + "geo_point": "POINT (-71.34 41.12)", + "binary": "aGVsbG8gY3VyaW91cyBwZXJzb24=", + "scaled_float": 1.5 + } + + - match: { errors: false } + + - do: + search: + index: test + body: + fields: ["number_copy", "boolean_copy", "keyword_copy", "date_copy", "text_copy", "ip_copy", "ip_range_copy", "geo_point_copy", "binary_copy", "scaled_float_copy"] + + - match: { hits.hits.0._source.number: 100 } + - match: { hits.hits.0.fields.number_copy.0: "100" } + + - match: { hits.hits.0._source.boolean: false } + - match: { hits.hits.0.fields.boolean_copy.0: "false" } + + - match: { hits.hits.0._source.keyword: "hello_keyword" } + - match: { hits.hits.0.fields.keyword_copy.0: "hello_keyword" } + + - match: { hits.hits.0._source.date: "2015-01-01T12:10:30Z" } + - match: { hits.hits.0.fields.date_copy.0: "2015-01-01T12:10:30Z" } + + - match: { hits.hits.0._source.text: "hello_text" } + - match: { hits.hits.0.fields.text_copy.0: "hello_text" } + + - match: { hits.hits.0._source.ip: "192.168.1.1" } + - match: { hits.hits.0.fields.ip_copy.0: "192.168.1.1" } + + - match: { hits.hits.0._source.ip_range: "10.0.0.0/24" } + - match: { hits.hits.0.fields.ip_range_copy.0: "10.0.0.0/24" } + + - match: { hits.hits.0._source.geo_point: "POINT (-71.34 41.12)" } + - match: { hits.hits.0.fields.geo_point_copy.0: "POINT (-71.34 41.12)" } + + - match: { hits.hits.0._source.binary: "aGVsbG8gY3VyaW91cyBwZXJzb24=" } + - match: { hits.hits.0.fields.binary_copy.0: "aGVsbG8gY3VyaW91cyBwZXJzb24=" } + + - match: { hits.hits.0._source.scaled_float: 1.5 } + - match: { hits.hits.0.fields.scaled_float_copy.0: "1.5" } + +--- +synthetic_source with disabled doc_values: + - requires: + cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] + reason: requires disabled doc_values support in synthetic source + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + number: + type: integer + doc_values: false + boolean: + type: boolean + doc_values: false + keyword: + type: keyword + doc_values: false + date: + type: date + doc_values: false + ip: + type: ip + doc_values: false + ip_range: + type: ip_range + doc_values: false + flattened: + type: flattened + doc_values: false + geo_point: + type: geo_point + doc_values: false + binary: + type: binary + doc_values: false + scaled_float: + type: scaled_float + scaling_factor: 10 + doc_values: false + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - >- + { + "number": 100, + "boolean": false, + "keyword": "hello_keyword", + "date": "2015-01-01T12:10:30Z", + "ip": "192.168.1.1", + "ip_range": "10.0.0.0/24", + "flattened": { "f": "hey" }, + "geo_point": "POINT (-71.34 41.12)", + "binary": "aGVsbG8gY3VyaW91cyBwZXJzb24=", + "scaled_float": 1.5 + } + + - match: { errors: false } + + - do: + search: + index: test + + - match: { hits.hits.0._source.number: 100 } + - match: { hits.hits.0._source.boolean: false } + - match: { hits.hits.0._source.keyword: "hello_keyword" } + - match: { hits.hits.0._source.date: "2015-01-01T12:10:30Z" } + - match: { hits.hits.0._source.ip: "192.168.1.1" } + - match: { hits.hits.0._source.ip_range: "10.0.0.0/24" } + - match: { hits.hits.0._source.flattened.f: "hey" } + - match: { hits.hits.0._source.geo_point: "POINT (-71.34 41.12)" } + - match: { hits.hits.0._source.binary: "aGVsbG8gY3VyaW91cyBwZXJzb24=" } + - match: { hits.hits.0._source.scaled_float: 1.5 } + +--- +fallback synthetic_source for text field: + - requires: + cluster_features: ["mapper.source.synthetic_source_with_copy_to_and_doc_values_false"] + reason: requires disabled doc_values support in synthetic source + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + properties: + text: + type: text + store: false + + - do: + index: + index: test + id: 1 + refresh: true + body: + text: [ "world", "hello", "world" ] + + - do: + search: + index: test + + - match: + hits.hits.0._source: + text: [ "world", "hello", "world" ] + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml index e46f67326a8d..371a9961122a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml @@ -1,85 +1,85 @@ --- setup: - do: - indices.create: - index: test_1 - body: - mappings: {} + indices.create: + index: test_1 + body: + mappings: {} - do: - indices.create: - index: test_2 - body: - mappings: {} + indices.create: + index: test_2 + body: + mappings: {} --- "Get /{index}/_mapping with empty mappings": - - do: - indices.create: - index: t + - do: + indices.create: + index: t - - do: - indices.get_mapping: - index: t + - do: + indices.get_mapping: + index: t - - match: { t.mappings: {}} + - match: { t.mappings: {}} --- "Get /_mapping": - - do: - indices.get_mapping: {} + - do: + indices.get_mapping: {} - - is_true: test_1.mappings - - is_true: test_2.mappings + - is_true: test_1.mappings + - is_true: test_2.mappings --- "Get /{index}/_mapping": - - do: - indices.get_mapping: - index: test_1 + - do: + indices.get_mapping: + index: test_1 - - is_true: test_1.mappings - - is_false: test_2 + - is_true: test_1.mappings + - is_false: test_2 --- "Get /_all/_mapping": - - do: - indices.get_mapping: - index: _all + - do: + indices.get_mapping: + index: _all - - is_true: test_1.mappings - - is_true: test_2.mappings + - is_true: test_1.mappings + - is_true: test_2.mappings --- "Get /*/_mapping": - - do: - indices.get_mapping: - index: '*' + - do: + indices.get_mapping: + index: '*' - - is_true: test_1.mappings - - is_true: test_2.mappings + - is_true: test_1.mappings + - is_true: test_2.mappings --- "Get /index,index/_mapping": - - do: - indices.get_mapping: - index: test_1,test_2 + - do: + indices.get_mapping: + index: test_1,test_2 - - is_true: test_1.mappings - - is_true: test_2.mappings + - is_true: test_1.mappings + - is_true: test_2.mappings --- "Get /index*/_mapping/": - - do: - indices.get_mapping: - index: '*2' + - do: + indices.get_mapping: + index: '*2' - - is_true: test_2.mappings - - is_false: test_1 + - is_true: test_2.mappings + - is_false: test_1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml index ff17a92ed0fc..2f3d2fa2f974 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/90_synthetic_source.yml @@ -247,58 +247,3 @@ force_synthetic_source_ok: obj: kwd: bar ---- -force_synthetic_source_bad_mapping: - - requires: - cluster_features: ["gte_v8.5.0"] - reason: message changed in 8.5 - - - do: - indices.create: - index: test - body: - settings: - number_of_shards: 1 # Use a single shard to get consistent error messages - mappings: - _source: - mode: stored - properties: - text: - type: text - - - do: - index: - index: test - id: 1 - body: - text: foo - - - do: - index: - index: test - id: 2 - body: - text: bar - - # When _source is used in the fetch the original _source is perfect - - do: - mget: - index: test - body: - ids: [ 1, 2 ] - - match: - docs.0._source: - text: foo - - match: - docs.1._source: - text: bar - - # Forcing synthetic source fails because the mapping is invalid - - do: - mget: - index: test - force_synthetic_source: true - body: - ids: [ 1, 2 ] - - match: {docs.0.error.reason: "field [text] of type [text] doesn't support synthetic source unless it is stored or has a sub-field of type [keyword] with doc values or stored and without a normalizer"} - - match: {docs.1.error.reason: "field [text] of type [text] doesn't support synthetic source unless it is stored or has a sub-field of type [keyword] with doc values or stored and without a normalizer"} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml index 75d488427a90..0cc1796bb47d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml @@ -208,56 +208,6 @@ force_synthetic_source_ok: obj: kwd: foo ---- -force_synthetic_source_bad_mapping: - - requires: - cluster_features: ["gte_v8.4.0"] - reason: introduced in 8.4.0 - - - do: - indices.create: - index: test - body: - settings: - number_of_shards: 1 # Use a single shard to get consistent error messages - mappings: - _source: - mode: stored - properties: - text: - type: text - - - do: - index: - index: test - id: 1 - refresh: true - body: - text: foo - - # When _source is used in the fetch the original _source is perfect - - do: - search: - index: test - body: - query: - ids: - values: [1] - - match: - hits.hits.0._source: - text: foo - - # Forcing synthetic source fails because the mapping is invalid - - do: - catch: bad_request - search: - index: test - force_synthetic_source: true - body: - query: - ids: - values: [1] - --- doc values keyword with ignore_above: - requires: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml index a32969b0b69b..da0f00d96053 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/simulate.ingest/10_basic.yml @@ -261,7 +261,9 @@ setup: "Test mapping validation from templates": - skip: - features: headers + features: + - headers + - allowed_warnings - requires: cluster_features: ["simulate.mapping.validation.templates"] @@ -279,6 +281,8 @@ setup: type: text - do: + allowed_warnings: + - "index template [v2_template] has index patterns [v2_strict_nonexistent*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [v2_template] will take precedence during new index creation" indices.put_index_template: name: v2_template body: @@ -291,6 +295,8 @@ setup: type: text - do: + allowed_warnings: + - "index template [v2_hidden_template] has index patterns [v2_strict_hidden_nonexistent*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [v2_hidden_template] will take precedence during new index creation" indices.put_index_template: name: v2_hidden_template body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml index 976ac8f08f79..db718959919d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml @@ -238,52 +238,3 @@ aggregate on _id: id: terms: field: _id - ---- -synthetic source text field: - - requires: - cluster_features: ["gte_v8.7.0"] - reason: "synthetic source introduced in 8.7.0" - - - do: - catch: /field \[k8s.pod.agent.name\] of type \[text\] doesn't support synthetic source unless it is stored or has a sub-field of type \[keyword\] with doc values or stored and without a normalizer/ - indices.create: - index: test-text-field - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 - index: - mode: time_series - routing_path: [ metricset, k8s.pod.uid ] - time_series: - start_time: 2021-04-28T00:00:00Z - end_time: 2021-04-29T00:00:00Z - mappings: - properties: - "@timestamp": - type: date - metricset: - type: keyword - time_series_dimension: true - k8s: - properties: - pod: - properties: - uid: - type: keyword - time_series_dimension: true - agent: - type: object - properties: - id: - type: text - fields: - raw: - type: keyword - name: - type: text - store: false - value: - type: long - time_series_metric: gauge diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/cluster.post_voting_config_exclusions_with_node_name_part.json b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/cluster.post_voting_config_exclusions_with_node_name_part.json deleted file mode 100644 index 2cdc2f3bc9ae..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/cluster.post_voting_config_exclusions_with_node_name_part.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "cluster.post_voting_config_exclusions_with_node_name_part":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/voting-config-exclusions.html", - "description":"Updates the cluster voting config exclusions by node_name (not node ids or node names)." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_cluster/voting_config_exclusions/{node_name}", - "methods":[ - "POST" - ], - "parts":{ - "node_name":{ - "type":"string", - "description":"A comma-separated list of node descriptors of the nodes to exclude from the voting configuration." - } - }, - "deprecated":{ - "version":"7.8.0", - "description":"node_name is deprecated, use node_names or node_ids instead" - } - } - ] - } - } -} diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/indices.put_template_with_param.json b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/indices.put_template_with_param.json deleted file mode 100644 index 7ee6cbd39ebf..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/api/indices.put_template_with_param.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "indices.put_template_with_param":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", - "description":"Creates or updates an index template." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], - "content_type": ["application/vnd.elasticsearch+json;compatible-with=7"] - }, - "url":{ - "paths":[ - { - "path":"/_template/{name}", - "methods":[ - "PUT", - "POST" - ], - "parts":{ - "name":{ - "type":"string", - "description":"The name of the template" - } - } - } - ] - }, - "params":{ - "template":{ - "type":"string", - "description":"The indices that this template should apply to, replaced by index_patterns within the template definition." - }, - "order":{ - "type":"number", - "description":"The order for this template when merging multiple matching ones (higher numbers are merged later, overriding the lower numbers)" - }, - "create":{ - "type":"boolean", - "description":"Whether the index template should only be added if new or can also replace an existing one", - "default":false - }, - "master_timeout":{ - "type":"time", - "description":"Specify timeout for connection to master" - } - }, - "body":{ - "description":"The template definition", - "required":true - } - } -} diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic_compat.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic_compat.yml deleted file mode 100644 index 8806918703ab..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/cluster.voting_config_exclusions/10_basic_compat.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "warnings_regex" - ---- -"Throw exception when adding voting config exclusion by specifying a 'node_name'": - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - cluster.post_voting_config_exclusions_with_node_name_part: - node_name: someNodeName - warnings_regex: - - ".* /_cluster/voting_config_exclusions/\\{node_name\\} has been removed. .*" - catch: /\[node_name\] has been removed, you must set \[node_names\] or \[node_ids\]/ diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.deprecated.upgrade/10_basic_upgrade.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.deprecated.upgrade/10_basic_upgrade.yml deleted file mode 100644 index b368975fa5e5..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.deprecated.upgrade/10_basic_upgrade.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "allowed_warnings_regex" - ---- -Basic test for upgrade indices: - - requires: - cluster_features: ["gte_v7.11.0"] - reason: "_upgrade api is deprecated since 7.11.0" - test_runner_features: - - "warnings" - - do: - indices.create: - index: "test_index" - body: - settings: - index: - number_of_replicas: 0 - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - do: - catch: "bad_request" - indices.upgrade: - index: "test_index" - warnings: - - "The _upgrade API is no longer useful and will be removed. Instead, see _reindex\ - \ API." - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - match: - status: 400 - - match: - error.reason: "/Upgrade.action.(GET|POST).(_upgrade|/test_index/_upgrade).was.removed,.use._reindex.API.instead/" diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.put_template/10_basic_compat.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.put_template/10_basic_compat.yml deleted file mode 100644 index 043e525a8e9b..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/indices.put_template/10_basic_compat.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "warnings" - ---- -"Put template": - - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - warnings: - - "Deprecated field [template] used, replaced by [index_patterns]" - indices.put_template: - name: test - body: - template: test-* - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - properties: - field: - type: keyword - - - do: - indices.get_template: - name: test - flat_settings: true - - - match: {test.index_patterns: ["test-*"]} - - match: {test.settings: {index.number_of_shards: '1', index.number_of_replicas: '0'}} - - match: {test.mappings: {properties: {field: {type: keyword}}}} - ---- -"Put template (with template parameter)": - - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - warnings: - - "Deprecated parameter [template] used, replaced by [index_patterns]" - indices.put_template_with_param: - name: test - template: "test-*" - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - properties: - field: - type: keyword - - - do: - indices.get_template: - name: test - flat_settings: true - - - match: {test.index_patterns: ["test-*"]} - - match: {test.settings: {index.number_of_shards: '1', index.number_of_replicas: '0'}} - - match: {test.mappings: {properties: {field: {type: keyword}}}} diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/nodes.hot_threads/10_basic_compat.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/nodes.hot_threads/10_basic_compat.yml deleted file mode 100644 index c64e80d0f6a0..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/nodes.hot_threads/10_basic_compat.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "allowed_warnings_regex" - ---- -"Get hot threads": - - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - nodes.hot_threads: {} - allowed_warnings_regex: - - ".*hot_?threads.* is a deprecated endpoint.*" - - match: - $body: /:::/ diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.aggregation/10_moving_avg.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.aggregation/10_moving_avg.yml deleted file mode 100644 index c3b3c4320be9..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.aggregation/10_moving_avg.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - ---- -moving_avg agg throws exception: - - do: - catch: "/Moving Average aggregation usage is not supported. Use the \\[moving_fn\\] aggregation instead./" - search: - rest_total_hits_as_int: true - body: - aggs: - the_histo: - date_histogram: - field: "date" - calendar_interval: "1d" - aggs: - the_avg: - avg: - field: "value_field" - the_movavg: - moving_avg: - buckets_path: "the_avg" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.sort/10_nested_path_filter.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.sort/10_nested_path_filter.yml deleted file mode 100644 index 323a5b9abbf1..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search.sort/10_nested_path_filter.yml +++ /dev/null @@ -1,149 +0,0 @@ ---- -setup: -- skip: - features: - - "headers" - - "allowed_warnings_regex" -- do: - indices.create: - index: "my-index" - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - properties: - offer: - type: "nested" -- do: - index: - index: "my-index" - id: "1" - refresh: true - body: - offer: - price: 10 - color: blue - - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - -- do: - indices.create: - index: "my-locations" - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - properties: - pin: - properties: - location: - type: geo_point - offer: - type: "nested" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - -- do: - index: - index: "my-locations" - id: "1" - refresh: true - body: - offer: - price: 10 - color: blue - pin: - location: - lat: 40.12 - lon: -71.34 - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - - - - - ---- -"Sort with nested_path throws exception": -- do: - catch: /\[nested_path\] has been removed in favour of the \[nested\] parameter/ - search: - rest_total_hits_as_int: true - index: "my-index" - body: - sort: - - offer.price: - mode: avg - order: asc - nested_path: offer - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - ---- -"Sort with nested_filter throws exception": - - do: - catch: /\[nested_filter\] has been removed in favour of the \[nested\] parameter/ - search: - rest_total_hits_as_int: true - index: "my-index" - body: - sort: - - offer.price: - mode: avg - order: asc - nested_filter: - term: - offer.color: blue - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - - ---- -"Geo search with nested_filter throws exception": - - do: - catch: /\[nested_filter\] has been removed in favour of the \[nested\] parameter/ - search: - rest_total_hits_as_int: true - index: "my-locations" - body: - query: - match_all: {} - sort: - _geo_distance: - pin.location: - - -70 - - 40 - nested_filter: - term: - offer.color: blue - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - ---- -"Geo search with nested_path throws exception": - - do: - catch: /\[nested_path\] has been removed in favour of the \[nested\] parameter/ - search: - rest_total_hits_as_int: true - index: "my-locations" - body: - query: - match_all: {} - sort: - _geo_distance: - pin.location: - - -70 - - 40 - nested_path: "offer" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_cutoff_frequency.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_cutoff_frequency.yml deleted file mode 100644 index b7df872ff0a8..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_cutoff_frequency.yml +++ /dev/null @@ -1,103 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "allowed_warnings_regex" - - do: - indices.create: - index: "test" - body: - mappings: - properties: - my_field1: - type: "text" - my_field2: - type: "text" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - do: - index: - index: "test" - id: "1" - body: - my_field1: "brown fox jump" - my_field2: "xylophone" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - do: - indices.refresh: {} - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - ---- -multi_match multiple fields with cutoff_frequency throws exception: -- do: - catch: "/cutoff_freqency is not supported. The \\[multi_match\\] query can skip block of documents efficiently if the total number of hits is not tracked/" - search: - rest_total_hits_as_int: true - index: "test" - body: - query: - multi_match: - query: "brown" - type: "bool_prefix" - fields: - - "my_field1" - - "my_field2" - cutoff_frequency: 0.001 - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - ---- -match with cutoff_frequency throws exception: - - do: - catch: "/cutoff_freqency is not supported. The \\[match\\] query can skip block of documents efficiently if the total number of hits is not tracked/" - search: - rest_total_hits_as_int: true - index: "test" - body: - query: - match: - my_field1: - query: "brown" - type: "bool_prefix" - cutoff_frequency: 0.001 - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - ---- -common querythrows exception: - - do: - catch: "/Common Terms Query usage is not supported. Use \\[match\\] query which can efficiently skip blocks of documents if the total number of hits is not tracked./" - search: - rest_total_hits_as_int: true - index: "test" - body: - query: - common: - my_field1: - query: "brown" - type: "bool_prefix" - cutoff_frequency: 0.001 - low_freq_operator: or - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_geo_bounding_box.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_geo_bounding_box.yml deleted file mode 100644 index 3f3eac1e59e1..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_geo_bounding_box.yml +++ /dev/null @@ -1,78 +0,0 @@ ---- -setup: - - requires: - test_runner_features: - - "headers" - - "warnings" - - do: - indices.create: - index: locations - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 - mappings: - - properties: - location: - type: geo_point - - do: - bulk: - index: locations - refresh: true - body: | - {"index":{}} - {"location" : {"lat": 13.5, "lon" : 34.89}} - {"index":{}} - {"location" : {"lat": -7.9, "lon" : 120.78}} - {"index":{}} - {"location" : {"lat": 45.78, "lon" : -173.45}} - {"index":{}} - {"location" : {"lat": 32.45, "lon" : 45.6}} - {"index":{}} - {"location" : {"lat": -63.24, "lon" : 31.0}} - {"index":{}} - {"location" : {"lat": 0.0, "lon" : 0.0}} - - ---- -"geo bounding box query not compatible": - - do: - catch: /failed to parse \[geo_bounding_box\] query. unexpected field \[type\]/ - search: - index: locations - body: - query: - geo_bounding_box: - type : indexed - location: - top_left: - lat: 10 - lon: -10 - bottom_right: - lat: -10 - lon: 10 - ---- -"geo bounding box query compatible": - - do: - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - warnings: - - "Deprecated parameter [type] used, it should no longer be specified." - search: - index: locations - body: - query: - geo_bounding_box: - type : indexed - location: - top_left: - lat: 10 - lon: -10 - bottom_right: - lat: -10 - lon: 10 - - match: {hits.total.value: 1} - diff --git a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_type_query.yml b/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_type_query.yml deleted file mode 100644 index fdaebbb2b81e..000000000000 --- a/rest-api-spec/src/yamlRestTestV7Compat/resources/rest-api-spec/test/search/10_type_query.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -setup: - - skip: - features: - - "headers" - - "allowed_warnings_regex" ---- -type query throws exception when used: - - do: - index: - index: "test1" - id: "1" - type: "cat" - refresh: true - body: - foo: "bar" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - - do: - catch: /\[types removal\] Type queries are deprecated, prefer to filter on a field instead./ - search: - rest_total_hits_as_int: true - index: "test1" - body: - query: - type: - value: "cat" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - - - do: - catch: /\[types removal\] Type queries are deprecated, prefer to filter on a field instead./ - search: - rest_total_hits_as_int: true - index: "test1" - body: - query: - type: - value: "_doc" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings_regex: - - "\\[types removal\\].*" - diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index 920677e8c4b4..27f8fc915cdd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -395,7 +395,7 @@ public void testFlush() { clearInterceptedActions(); String[] concreteIndexNames = TestIndexNameExpressionResolver.newInstance() - .concreteIndexNames(clusterAdmin().prepareState().get().getState(), flushRequest); + .concreteIndexNames(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), flushRequest); assertIndicesSubset(Arrays.asList(concreteIndexNames), indexShardActions); } @@ -422,7 +422,7 @@ public void testRefresh() { clearInterceptedActions(); String[] concreteIndexNames = TestIndexNameExpressionResolver.newInstance() - .concreteIndexNames(clusterAdmin().prepareState().get().getState(), refreshRequest); + .concreteIndexNames(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), refreshRequest); assertIndicesSubset(Arrays.asList(concreteIndexNames), indexShardActions); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 897f10b031dc..245e7cede4cf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -1053,7 +1053,7 @@ public void testCannotAllocateStaleReplicaExplanation() throws Exception { logger.info("--> close the index, now the replica is stale"); assertAcked(indicesAdmin().prepareClose("idx")); - final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth("idx") + final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx") .setTimeout(TimeValue.timeValueSeconds(30)) .setWaitForActiveShards(ActiveShardCount.ONE) .setWaitForNoInitializingShards(true) @@ -1254,7 +1254,7 @@ private void prepareIndex( if (state == IndexMetadata.State.CLOSE) { assertAcked(indicesAdmin().prepareClose("idx")); - final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth("idx") + final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx") .setTimeout(TimeValue.timeValueSeconds(30)) .setWaitForActiveShards(activeShardCount) .setWaitForEvents(Priority.LANGUID) @@ -1275,13 +1275,13 @@ private void indexData() { } private String primaryNodeName() { - ClusterState clusterState = admin().cluster().prepareState().get().getState(); + ClusterState clusterState = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String nodeId = clusterState.getRoutingTable().index("idx").shard(0).primaryShard().currentNodeId(); return clusterState.getRoutingNodes().node(nodeId).node().getName(); } private DiscoveryNode replicaNode() { - ClusterState clusterState = admin().cluster().prepareState().get().getState(); + ClusterState clusterState = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String nodeId = clusterState.getRoutingTable().index("idx").shard(0).replicaShards().get(0).currentNodeId(); return clusterState.getRoutingNodes().node(nodeId).node(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java index d0e0543bcca0..54b1b08806a9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceActionIT.java @@ -37,7 +37,9 @@ public void testDesiredBalanceOnMultiNodeCluster() throws Exception { indexData(index); - var clusterHealthResponse = clusterAdmin().health(new ClusterHealthRequest().waitForStatus(ClusterHealthStatus.GREEN)).get(); + var clusterHealthResponse = clusterAdmin().health( + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT).waitForStatus(ClusterHealthStatus.GREEN) + ).get(); assertEquals(RestStatus.OK, clusterHealthResponse.status()); final var desiredBalanceResponse = safeGet( @@ -50,7 +52,7 @@ public void testDesiredBalanceOnMultiNodeCluster() throws Exception { for (var entry : shardsMap.entrySet()) { Integer shardId = entry.getKey(); DesiredBalanceResponse.DesiredShards desiredShards = entry.getValue(); - IndexShardRoutingTable shardRoutingTable = clusterAdmin().prepareState() + IndexShardRoutingTable shardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .routingTable() @@ -73,7 +75,9 @@ public void testDesiredBalanceWithUnassignedShards() throws Exception { int numberOfReplicas = 1; createIndex(index, numberOfShards, numberOfReplicas); indexData(index); - var clusterHealthResponse = clusterAdmin().health(new ClusterHealthRequest(index).waitForStatus(ClusterHealthStatus.YELLOW)).get(); + var clusterHealthResponse = clusterAdmin().health( + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, index).waitForStatus(ClusterHealthStatus.YELLOW) + ).get(); assertEquals(RestStatus.OK, clusterHealthResponse.status()); final var desiredBalanceResponse = safeGet( @@ -86,7 +90,7 @@ public void testDesiredBalanceWithUnassignedShards() throws Exception { for (var entry : shardsMap.entrySet()) { Integer shardId = entry.getKey(); DesiredBalanceResponse.DesiredShards desiredShards = entry.getValue(); - IndexShardRoutingTable shardRoutingTable = clusterAdmin().prepareState() + IndexShardRoutingTable shardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .routingTable() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java index 63801f8c1e51..0fb8b450ffaf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDesiredNodesActionsIT.java @@ -87,6 +87,8 @@ public void testUpdateDesiredNodesIsIdempotent() { } final var equivalentUpdateRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, updateDesiredNodesRequest.getHistoryID(), updateDesiredNodesRequest.getVersion(), desiredNodesList, @@ -105,6 +107,8 @@ public void testGoingBackwardsWithinTheSameHistoryIsForbidden() { updateDesiredNodes(updateDesiredNodesRequest); final var backwardsUpdateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, updateDesiredNodesRequest.getHistoryID(), updateDesiredNodesRequest.getVersion() - 1, updateDesiredNodesRequest.getNodes(), @@ -123,6 +127,8 @@ public void testSameVersionWithDifferentContentIsForbidden() { updateDesiredNodes(updateDesiredNodesRequest); final var updateDesiredNodesRequestWithSameHistoryIdAndVersionAndDifferentSpecs = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, updateDesiredNodesRequest.getHistoryID(), updateDesiredNodesRequest.getVersion(), randomList(1, 10, DesiredNodesTestCase::randomDesiredNode), @@ -192,6 +198,8 @@ public void testNodeProcessorsGetValidatedWithDesiredNodeProcessors() { // This test verifies that the validation doesn't throw on desired nodes // with a higher number of available processors than the node running the tests. final var updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, UUIDs.randomBase64UUID(), randomIntBetween(1, 20), randomList( @@ -267,7 +275,7 @@ public void testDeleteDesiredNodesTasksAreBatchedCorrectly() throws Exception { future.actionGet(); } - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final DesiredNodes latestDesiredNodes = DesiredNodes.latestFromClusterState(state); assertThat(latestDesiredNodes, is(nullValue())); } @@ -309,6 +317,8 @@ private UpdateDesiredNodesRequest randomUpdateDesiredNodesRequest() { private UpdateDesiredNodesRequest randomUpdateDesiredNodesRequest(Settings settings) { return new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, UUIDs.randomBase64UUID(), randomIntBetween(2, 20), randomList(2, 10, () -> randomDesiredNode(settings)), @@ -318,6 +328,8 @@ private UpdateDesiredNodesRequest randomUpdateDesiredNodesRequest(Settings setti private UpdateDesiredNodesRequest randomDryRunUpdateDesiredNodesRequest(Settings settings) { return new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, UUIDs.randomBase64UUID(), randomIntBetween(2, 20), randomList(2, 10, () -> randomDesiredNode(settings)), @@ -331,7 +343,7 @@ private void deleteDesiredNodes() { } private DesiredNodes getLatestDesiredNodes() { - final GetDesiredNodesAction.Request request = new GetDesiredNodesAction.Request(); + final GetDesiredNodesAction.Request request = new GetDesiredNodesAction.Request(TEST_REQUEST_TIMEOUT); final GetDesiredNodesAction.Response response = client().execute(GetDesiredNodesAction.INSTANCE, request).actionGet(); return response.getDesiredNodes(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 32d8be475dbb..180bef7ea409 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -141,7 +141,7 @@ public void testMasterNodeOperationTasks() throws Exception { registerTaskManagerListeners(TransportClusterHealthAction.NAME); // First run the health on the master node - should produce only one task on the master node - internalCluster().masterClient().admin().cluster().prepareHealth().get(); + internalCluster().masterClient().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertEquals(1, numberOfEvents(TransportClusterHealthAction.NAME, Tuple::v1)); // counting only registration events // counting only unregistration events // When checking unregistration events there might be some delay since receiving the response from the cluster doesn't @@ -151,7 +151,7 @@ public void testMasterNodeOperationTasks() throws Exception { resetTaskManagerListeners(TransportClusterHealthAction.NAME); // Now run the health on a non-master node - should produce one task on master and one task on another node - internalCluster().nonMasterClient().admin().cluster().prepareHealth().get(); + internalCluster().nonMasterClient().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertEquals(2, numberOfEvents(TransportClusterHealthAction.NAME, Tuple::v1)); // counting only registration events // counting only unregistration events assertBusy(() -> assertEquals(2, numberOfEvents(TransportClusterHealthAction.NAME, event -> event.v1() == false))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java index bb2c97ec9aa6..85dd1337204b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateActionDisruptionIT.java @@ -49,7 +49,7 @@ protected Collection> nodePlugins() { public void testNonLocalRequestAlwaysFindsMaster() throws Exception { runRepeatedlyWhileChangingMaster(() -> { - final ClusterStateRequestBuilder clusterStateRequestBuilder = clusterAdmin().prepareState() + final ClusterStateRequestBuilder clusterStateRequestBuilder = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setNodes(true) .setBlocks(true) @@ -69,7 +69,7 @@ public void testLocalRequestAlwaysSucceeds() throws Exception { final String node = randomFrom(internalCluster().getNodeNames()); final DiscoveryNodes discoveryNodes = client(node).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setLocal(true) .setNodes(true) @@ -98,7 +98,7 @@ public void testNonLocalRequestAlwaysFindsMasterAndWaitsForMetadata() throws Exc final long waitForMetadataVersion = randomLongBetween(Math.max(1, metadataVersion - 3), metadataVersion + 5); final ClusterStateRequestBuilder clusterStateRequestBuilder = client(node).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setNodes(true) .setMetadata(true) @@ -131,7 +131,7 @@ public void testLocalRequestWaitsForMetadata() throws Exception { final long waitForMetadataVersion = randomLongBetween(Math.max(1, metadataVersion - 3), metadataVersion + 5); final ClusterStateResponse clusterStateResponse = client(node).admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setLocal(true) .setMetadata(true) @@ -156,7 +156,7 @@ public void runRepeatedlyWhileChangingMaster(Runnable runnable) throws Exception assertBusy( () -> assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setBlocks(true) @@ -188,7 +188,7 @@ public void runRepeatedlyWhileChangingMaster(Runnable runnable) throws Exception assertAcked( client(nonMasterNode).admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), value)) ); } @@ -225,17 +225,22 @@ public void runRepeatedlyWhileChangingMaster(Runnable runnable) throws Exception public void testFailsWithBlockExceptionIfBlockedAndBlocksNotRequested() { internalCluster().startMasterOnlyNode(Settings.builder().put(GatewayService.RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 1).build()); - final var state = safeGet(clusterAdmin().prepareState().clear().setBlocks(true).execute()).getState(); + final var state = safeGet(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setBlocks(true).execute()).getState(); assertTrue(state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat( - safeAwaitFailure(SubscribableListener.newForked(l -> clusterAdmin().prepareState().clear().execute(l))), + safeAwaitFailure( + SubscribableListener.newForked( + l -> clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().execute(l) + ) + ), instanceOf(ClusterBlockException.class) ); internalCluster().startDataOnlyNode(); - final var recoveredState = safeGet(clusterAdmin().prepareState().clear().setBlocks(randomBoolean()).execute()).getState(); + final var recoveredState = safeGet(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setBlocks(randomBoolean()).execute()) + .getState(); assertFalse(recoveredState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 0c3dac0f99b6..2385c42526d4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -64,7 +64,8 @@ private void assertCounts(ClusterStatsNodes.Counts counts, int total, Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); String mergeNode = discoveryNodes[0].getName(); @@ -158,7 +158,11 @@ public void testShrinkIndexPrimaryTerm() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); - final Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + final Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getDataNodes(); assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); final String mergeNode = discoveryNodes[0].getName(); @@ -222,7 +226,10 @@ public void testShrinkIndexPrimaryTerm() throws Exception { } private static IndexMetadata indexMetadata(final Client client, final String index) { - final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + final ClusterStateResponse clusterStateResponse = client.admin() + .cluster() + .state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)) + .actionGet(); return clusterStateResponse.getState().metadata().index(index); } @@ -236,7 +243,7 @@ public void testCreateShrinkIndex() { for (int i = 0; i < docs; i++) { prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } - Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node @@ -272,7 +279,7 @@ public void testCreateShrinkIndex() { assertNoResizeSourceIndexSettings("target"); // resolve true merge node - this is not always the node we required as all shards may be on another node - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); logger.info("merge node {}", mergeNode); @@ -342,7 +349,7 @@ public void testCreateShrinkIndexFails() throws Exception { for (int i = 0; i < 20; i++) { prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } - Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); String spareNode = discoveryNodes[0].getName(); @@ -369,7 +376,7 @@ public void testCreateShrinkIndexFails() throws Exception { .build() ) .get(); - clusterAdmin().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "target").setWaitForEvents(Priority.LANGUID).get(); // now we move all shards away from the merge node updateIndexSettings( @@ -382,7 +389,7 @@ public void testCreateShrinkIndexFails() throws Exception { updateIndexSettings(Settings.builder().putNull("index.routing.allocation.exclude._name"), "target"); // wait until it fails assertBusy(() -> { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); RoutingTable routingTables = clusterStateResponse.getState().routingTable(); assertTrue(routingTables.index("target").shard(0).shard(0).unassigned()); assertEquals( @@ -427,7 +434,7 @@ public void testCreateShrinkWithIndexSort() throws Exception { for (int i = 0; i < 20; i++) { prepareIndex("source").setId(Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); } - Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); String mergeNode = discoveryNodes[0].getName(); @@ -482,7 +489,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); } indicesAdmin().prepareFlush("source").get(); - Map dataNodes = clusterAdmin().prepareState().get().getState().nodes().getDataNodes(); + Map dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getDataNodes(); DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode[]::new); // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due @@ -508,7 +515,7 @@ public void testShrinkCommitsMergeOnIdle() throws Exception { ensureGreen(); assertNoResizeSourceIndexSettings("target"); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); IndexMetadata target = clusterStateResponse.getState().getMetadata().index("target"); indicesAdmin().prepareForceMerge("target").setMaxNumSegments(1).setFlush(false).get(); IndicesSegmentResponse targetSegStats = indicesAdmin().prepareSegments("target").get(); @@ -601,7 +608,7 @@ public void testShrinkThenSplitWithFailedNode() throws Exception { } static void assertNoResizeSourceIndexSettings(final String index) { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState() + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .clear() .setMetadata(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 22549a1562dc..41646496c59c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -333,7 +333,10 @@ public void testSplitIndexPrimaryTerm() throws Exception { } private static IndexMetadata indexMetadata(final Client client, final String index) { - final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + final ClusterStateResponse clusterStateResponse = client.admin() + .cluster() + .state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)) + .actionGet(); return clusterStateResponse.getState().metadata().index(index); } @@ -371,7 +374,7 @@ public void testCreateSplitIndex() throws Exception { ensureGreen(); assertNoResizeSourceIndexSettings("target"); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); logger.info("split node {}", mergeNode); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 16f8f51cb8aa..becea454b7d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -83,7 +83,7 @@ public void testRolloverOnEmptyIndex() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-1"); if (explicitWriteIndex) { assertTrue(oldIndex.getAliases().containsKey("test_alias")); @@ -106,7 +106,7 @@ public void testRollover() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-2"); assertFalse(oldIndex.getAliases().containsKey("test_alias")); final IndexMetadata newIndex = state.metadata().index("test_index-000003"); @@ -139,7 +139,7 @@ public void testRolloverWithExplicitWriteIndex() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-2"); assertTrue(oldIndex.getAliases().containsKey("test_alias")); assertFalse(oldIndex.getAliases().get("test_alias").writeIndex()); @@ -187,7 +187,7 @@ public void testRolloverWithIndexSettings() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-2"); final IndexMetadata newIndex = state.metadata().index("test_index-000003"); assertThat(newIndex.getNumberOfShards(), equalTo(1)); @@ -220,7 +220,7 @@ public void testRolloverWithIndexSettingsWithoutPrefix() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-2"); final IndexMetadata newIndex = state.metadata().index("test_index-000003"); assertThat(newIndex.getNumberOfShards(), equalTo(1)); @@ -268,7 +268,7 @@ public void testRolloverDryRun() throws Exception { assertThat(response.isDryRun(), equalTo(true)); assertThat(response.isRolledOver(), equalTo(false)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-1"); assertTrue(oldIndex.getAliases().containsKey("test_alias")); final IndexMetadata newIndex = state.metadata().index("test_index-000002"); @@ -334,7 +334,7 @@ public void testRolloverConditionsNotMet() throws Exception { ) ); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index-0"); assertTrue(oldIndex.getAliases().containsKey("test_alias")); if (explicitWriteIndex) { @@ -361,7 +361,7 @@ public void testRolloverWithNewIndexName() throws Exception { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index("test_index"); final IndexMetadata newIndex = state.metadata().index("test_new_index"); assertTrue(newIndex.getAliases().containsKey("test_alias")); @@ -452,7 +452,7 @@ public void testRolloverMaxSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("No rollover with a large max_size condition", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } @@ -466,7 +466,7 @@ public void testRolloverMaxSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("Should rollover with a small max_size condition", response.isRolledOver(), equalTo(true)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); List> metConditions = oldIndex.getRolloverInfos().get("test_alias").getMetConditions(); assertThat(metConditions.size(), equalTo(1)); assertThat(metConditions.get(0).toString(), equalTo(new MaxSizeCondition(maxSizeValue).toString())); @@ -488,7 +488,11 @@ public void testRolloverMaxSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-000002")); assertThat(response.getNewIndex(), equalTo("test-000003")); assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-000002"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test-000002"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } } @@ -513,7 +517,7 @@ public void testRolloverMaxPrimaryShardSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("No rollover with a large max_primary_shard_size condition", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } @@ -527,7 +531,7 @@ public void testRolloverMaxPrimaryShardSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("Should rollover with a small max_primary_shard_size condition", response.isRolledOver(), equalTo(true)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); List> metConditions = oldIndex.getRolloverInfos().get("test_alias").getMetConditions(); assertThat(metConditions.size(), equalTo(1)); assertThat(metConditions.get(0).toString(), equalTo(new MaxPrimaryShardSizeCondition(maxPrimaryShardSizeCondition).toString())); @@ -549,7 +553,11 @@ public void testRolloverMaxPrimaryShardSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-000002")); assertThat(response.getNewIndex(), equalTo("test-000003")); assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-000002"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test-000002"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } } @@ -573,7 +581,7 @@ public void testRolloverMaxPrimaryShardDocs() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("No rollover with a large max_primary_shard_docs condition", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } @@ -587,7 +595,7 @@ public void testRolloverMaxPrimaryShardDocs() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("Should rollover with a small max_primary_shard_docs condition", response.isRolledOver(), equalTo(true)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-1"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test-1"); List> metConditions = oldIndex.getRolloverInfos().get("test_alias").getMetConditions(); assertThat(metConditions.size(), equalTo(1)); assertThat( @@ -610,7 +618,11 @@ public void testRolloverMaxPrimaryShardDocs() throws Exception { assertThat(response.getOldIndex(), equalTo("test-000002")); assertThat(response.getNewIndex(), equalTo("test-000003")); assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); - final IndexMetadata oldIndex = clusterAdmin().prepareState().get().getState().metadata().index("test-000002"); + final IndexMetadata oldIndex = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test-000002"); assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } } @@ -698,7 +710,7 @@ public void testRolloverWithHiddenAliasesAndExplicitWriteIndex() { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index(firstIndexName); assertTrue(oldIndex.getAliases().containsKey(aliasName)); assertTrue(oldIndex.getAliases().get(aliasName).isHidden()); @@ -732,7 +744,7 @@ public void testRolloverWithHiddenAliasesAndImplicitWriteIndex() { assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata oldIndex = state.metadata().index(firstIndexName); assertFalse(oldIndex.getAliases().containsKey(aliasName)); final IndexMetadata newIndex = state.metadata().index(secondIndexName); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 1a070c8bd0de..e6b042c059f4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -88,10 +88,10 @@ public void testBasic() throws Exception { logger.info("--> disable allocation"); disableAllocation(index); logger.info("--> stop random node"); - int num = clusterAdmin().prepareState().get().getState().nodes().getSize(); + int num = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getSize(); internalCluster().stopNode(internalCluster().getNodeNameThat(new IndexNodePredicate(index))); - assertNoTimeout(clusterAdmin().prepareHealth().setWaitForNodes("" + (num - 1))); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + assertNoTimeout(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("" + (num - 1))); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List unassignedShards = clusterState.routingTable().index(index).shardsWithState(ShardRoutingState.UNASSIGNED); response = execute(new IndicesShardStoresRequest(index)); assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); @@ -227,7 +227,7 @@ public boolean test(Settings settings) { } private Set findNodesWithShard(String index) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndexRoutingTable indexRoutingTable = state.routingTable().index(index); List startedShards = indexRoutingTable.shardsWithState(ShardRoutingState.STARTED); Set nodesNamesWithShard = new HashSet<>(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index 38d571928729..300ef1691a07 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -14,15 +14,12 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.ingest.IngestTestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -31,7 +28,6 @@ import java.util.Collection; import java.util.Collections; import java.util.Map; -import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -39,7 +35,6 @@ import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -134,17 +129,11 @@ public void testBulkWithGlobalDefaults() throws Exception { } } - private void createSamplePipeline(String pipelineId) throws IOException, ExecutionException, InterruptedException { - XContentBuilder pipeline = jsonBuilder().startObject() - .startArray("processors") - .startObject() - .startObject("test") - .endObject() - .endObject() - .endArray() - .endObject(); - - assertAcked(clusterAdmin().putPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(pipeline), XContentType.JSON))); + private void createSamplePipeline(String pipelineId) throws IOException { + putJsonPipeline( + pipelineId, + (builder, params) -> builder.startArray("processors").startObject().startObject("test").endObject().endObject().endArray() + ); } /** This test ensures that index deletion makes indexing fail quickly, not wait on the index that has disappeared */ diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java index 85b720a03478..d8797f3c6457 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsIT.java @@ -28,7 +28,7 @@ public void testBulkProcessorAutoCreateRestrictions() { internalCluster().startNode(settings); createIndex("willwork"); - clusterAdmin().prepareHealth("willwork").setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "willwork").setWaitForGreenStatus().get(); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); bulkRequestBuilder.add(prepareIndex("willwork").setId("1").setSource("{\"foo\":1}", XContentType.JSON)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java index 4a56a6ce8ddb..573d929ee30a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/TransportSimulateBulkActionIT.java @@ -80,7 +80,7 @@ public void testMappingValidationIndexExists() { SearchResponse searchResponse = client().search(new SearchRequest(indexName)).actionGet(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); searchResponse.decRef(); - ClusterStateResponse clusterStateResponse = admin().cluster().state(new ClusterStateRequest()).actionGet(); + ClusterStateResponse clusterStateResponse = admin().cluster().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); Map indexMapping = clusterStateResponse.getState().metadata().index(indexName).mapping().sourceAsMap(); Map fields = (Map) indexMapping.get("properties"); assertThat(fields.size(), equalTo(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java index 39273e9d1712..023fa54fef9e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java @@ -134,7 +134,7 @@ public void testCreateIndexStopsWaitingWhenIndexDeleted() throws Exception { .execute(); logger.info("--> wait until the cluster state contains the new index"); - assertBusy(() -> assertTrue(clusterAdmin().prepareState().get().getState().metadata().hasIndex(indexName))); + assertBusy(() -> assertTrue(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().hasIndex(indexName))); logger.info("--> delete the index"); assertAcked(indicesAdmin().prepareDelete(indexName)); @@ -148,7 +148,7 @@ public void testCreateIndexStopsWaitingWhenIndexDeleted() throws Exception { // only after the test cleanup does the index creation manifest in the cluster state. To take care of this problem // and its potential ramifications, we wait here for the index creation cluster state update task to finish private void waitForIndexCreationToComplete(final String indexName) { - clusterAdmin().prepareHealth(indexName).setWaitForEvents(Priority.URGENT).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).setWaitForEvents(Priority.URGENT).get(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java index 6737d02434c0..bb970f69ead1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java @@ -53,7 +53,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { allowNodes("test", 2); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForActiveShards(2) .setWaitForYellowStatus() @@ -90,7 +90,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { } allowNodes("test", 3); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForActiveShards(3) .setWaitForGreenStatus() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java index e568b51e43b2..321c1c84d5cb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/master/TransportMasterNodeActionIT.java @@ -79,7 +79,13 @@ public void testRoutingLoopProtection() { try { final var newMaster = ensureSufficientMasterEligibleNodes(); - final long originalTerm = internalCluster().masterClient().admin().cluster().prepareState().get().getState().term(); + final long originalTerm = internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .term(); final var previousMasterKnowsNewMasterIsElectedLatch = configureElectionLatch(newMaster, cleanupTasks); final var newMasterReceivedReroutedMessageFuture = new PlainActionFuture<>(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java index 2f10711db737..91903fd70003 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -202,7 +202,7 @@ public void testFilteringAliases() throws Exception { // For now just making sure that filter was stored with the alias logger.info("--> making sure that filter was stored with alias [alias1] and filter [user:kimchy]"); - ClusterState clusterState = admin().cluster().prepareState().get().getState(); + ClusterState clusterState = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndexMetadata indexMd = clusterState.metadata().index("test"); assertThat(indexMd.getAliases().get("alias1").filter().string(), equalTo(""" {"term":{"user":{"value":"kimchy"}}}""")); @@ -1416,21 +1416,33 @@ private void assertAliasesVersionIncreases(final String index, final Runnable ru private void assertAliasesVersionIncreases(final String[] indices, final Runnable runnable) { final var beforeAliasesVersions = new HashMap(indices.length); - final var beforeMetadata = admin().cluster().prepareState().get().getState().metadata(); + final var beforeMetadata = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); for (final var index : indices) { beforeAliasesVersions.put(index, beforeMetadata.index(index).getAliasesVersion()); } runnable.run(); - final var afterMetadata = admin().cluster().prepareState().get().getState().metadata(); + final var afterMetadata = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); for (final String index : indices) { assertThat(afterMetadata.index(index).getAliasesVersion(), equalTo(1 + beforeAliasesVersions.get(index))); } } private void assertAliasesVersionUnchanged(final String index, final Runnable runnable) { - final long beforeAliasesVersion = admin().cluster().prepareState().get().getState().metadata().index(index).getAliasesVersion(); + final long beforeAliasesVersion = admin().cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index(index) + .getAliasesVersion(); runnable.run(); - final long afterAliasesVersion = admin().cluster().prepareState().get().getState().metadata().index(index).getAliasesVersion(); + final long afterAliasesVersion = admin().cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index(index) + .getAliasesVersion(); assertThat(afterAliasesVersion, equalTo(beforeAliasesVersion)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java index c5c3e441363d..517357782715 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java @@ -298,7 +298,7 @@ public void testAddBlockToUnassignedIndex() throws Exception { .setSettings(Settings.builder().put("index.routing.allocation.include._name", "nothing").build()) ); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().indices().get(indexName).getState(), is(IndexMetadata.State.OPEN)); assertThat(clusterState.routingTable().allShards().allMatch(ShardRouting::unassigned), is(true)); @@ -393,7 +393,7 @@ public void testAddBlockWhileDeletingIndices() throws Exception { } indices[i] = indexName; } - assertThat(clusterAdmin().prepareState().get().getState().metadata().indices().size(), equalTo(indices.length)); + assertThat(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indices().size(), equalTo(indices.length)); final List threads = new ArrayList<>(); final CountDownLatch latch = new CountDownLatch(1); @@ -434,7 +434,7 @@ public void testAddBlockWhileDeletingIndices() throws Exception { } static void assertIndexHasBlock(APIBlock block, final String... indices) { - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (String index : indices) { final IndexMetadata indexMetadata = clusterState.metadata().indices().get(index); final Settings indexSettings = indexMetadata.getSettings(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java index 65c0aa654818..a190ac61bbe1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java @@ -43,7 +43,7 @@ public void testSimpleLocalHealth() { logger.info("--> getting cluster health on [{}]", node); final ClusterHealthResponse health = client(node).admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setLocal(true) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueSeconds(30)) @@ -56,7 +56,7 @@ public void testSimpleLocalHealth() { public void testHealth() { logger.info("--> running cluster health on an index that does not exists"); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth("test1") + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test1") .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(1)) .get(); @@ -65,7 +65,10 @@ public void testHealth() { assertThat(healthResponse.getIndices().isEmpty(), equalTo(true)); logger.info("--> running cluster wide health"); - healthResponse = clusterAdmin().prepareHealth().setWaitForGreenStatus().setTimeout(TimeValue.timeValueSeconds(10)).get(); + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setTimeout(TimeValue.timeValueSeconds(10)) + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(healthResponse.getIndices().isEmpty(), equalTo(true)); @@ -74,13 +77,16 @@ public void testHealth() { createIndex("test1"); logger.info("--> running cluster health on an index that does exists"); - healthResponse = clusterAdmin().prepareHealth("test1").setWaitForGreenStatus().setTimeout(TimeValue.timeValueSeconds(10)).get(); + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test1") + .setWaitForGreenStatus() + .setTimeout(TimeValue.timeValueSeconds(10)) + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); logger.info("--> running cluster health on an index that does exists and an index that doesn't exists"); - healthResponse = clusterAdmin().prepareHealth("test1", "test2") + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test1", "test2") .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(1)) .get(); @@ -93,7 +99,7 @@ public void testHealth() { public void testHealthWithClosedIndices() { createIndex("index-1"); { - ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -103,7 +109,7 @@ public void testHealthWithClosedIndices() { assertAcked(indicesAdmin().prepareClose("index-2")); { - ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(2)); @@ -111,21 +117,21 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-1").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-1").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-2").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-2").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(2)); @@ -133,7 +139,7 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*") + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*") .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -143,7 +149,7 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-2"), nullValue()); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*") + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*") .setIndicesOptions(IndicesOptions.fromOptions(true, true, false, true)) .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -157,7 +163,7 @@ public void testHealthWithClosedIndices() { assertAcked(indicesAdmin().prepareClose("index-3")); { - ClusterHealthResponse response = clusterAdmin().prepareHealth() + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNoRelocatingShards(true) .setWaitForNoInitializingShards(true) .setWaitForYellowStatus() @@ -170,28 +176,28 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-1").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-1").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-2").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-2").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-3").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-3").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(1)); assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*").get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*").get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(3)); @@ -200,7 +206,7 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW)); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*") + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*") .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -211,7 +217,7 @@ public void testHealthWithClosedIndices() { assertThat(response.getIndices().get("index-3"), nullValue()); } { - ClusterHealthResponse response = clusterAdmin().prepareHealth("index-*") + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index-*") .setIndicesOptions(IndicesOptions.fromOptions(true, true, false, true)) .get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); @@ -224,7 +230,7 @@ public void testHealthWithClosedIndices() { setReplicaCount(numberOfReplicas(), "index-3"); { - ClusterHealthResponse response = clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse response = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(response.isTimedOut(), equalTo(false)); assertThat(response.getIndices().size(), equalTo(3)); @@ -240,7 +246,7 @@ public void testHealthOnIndexCreation() throws Exception { @Override public void run() { while (finished.get() == false) { - ClusterHealthResponse health = clusterAdmin().prepareHealth().get(); + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get(); assertThat(health.getStatus(), not(equalTo(ClusterHealthStatus.RED))); } } @@ -254,7 +260,7 @@ public void run() { } public void testWaitForEventsRetriesIfOtherConditionsNotMet() { - final ActionFuture healthResponseFuture = clusterAdmin().prepareHealth("index") + final ActionFuture healthResponseFuture = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .execute(); @@ -286,7 +292,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) try { createIndex("index"); - assertFalse(clusterAdmin().prepareHealth("index").setWaitForGreenStatus().get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index").setWaitForGreenStatus().get().isTimedOut()); // at this point the original health response should not have returned: there was never a point where the index was green AND // the master had processed all pending tasks above LANGUID priority. @@ -326,7 +332,7 @@ public void testHealthOnMasterFailover() throws Exception { responseFutures.add( client(node).admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setMasterNodeTimeout(TimeValue.timeValueMinutes(timeoutMinutes)) @@ -369,7 +375,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) }); try { - final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + final ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueSeconds(1)) .get(TimeValue.timeValueSeconds(30)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index cc930cdad595..f0801d01a70d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -206,7 +206,7 @@ public void testClusterInfoServiceInformationClearOnError() { prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).get(); ensureGreen("test"); - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -320,7 +320,12 @@ public void testClusterInfoServiceInformationClearOnError() { assertThat("size for shard " + shardRouting + " found", originalInfo.getShardSize(shardRouting), notNullValue()); } - RoutingTable routingTable = clusterAdmin().prepareState().clear().setRoutingTable(true).get().getState().routingTable(); + RoutingTable routingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setRoutingTable(true) + .get() + .getState() + .routingTable(); for (ShardRouting shard : routingTable.allShardsIterator()) { assertTrue( infoAfterRecovery.getReservedSpace(shard.currentNodeId(), infoAfterRecovery.getDataPath(shard)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index ffa2cd29778a..8fa63c15c7aa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -152,7 +152,6 @@ public void testClusterStateDiffSerialization() throws Exception { } // Check routing table - assertThat(clusterStateFromDiffs.routingTable().version(), equalTo(clusterState.routingTable().version())); assertThat(clusterStateFromDiffs.routingTable().indicesRouting(), equalTo(clusterState.routingTable().indicesRouting())); // Check cluster blocks diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java index aaf663c8c5b2..382d7aa8eb64 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java @@ -55,11 +55,15 @@ private UpdateDesiredNodesResponse updateDesiredNodes(UpdateDesiredNodesRequest } private DesiredNodes getLatestDesiredNodes() { - return client().execute(GetDesiredNodesAction.INSTANCE, new GetDesiredNodesAction.Request()).actionGet().getDesiredNodes(); + return client().execute(GetDesiredNodesAction.INSTANCE, new GetDesiredNodesAction.Request(TEST_REQUEST_TIMEOUT)) + .actionGet() + .getDesiredNodes(); } private UpdateDesiredNodesRequest randomUpdateDesiredNodesRequest() { return new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), randomIntBetween(1, 10), randomList( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesStatusIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesStatusIT.java index 77fcdb446baa..08dbd800ede5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesStatusIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesStatusIT.java @@ -35,6 +35,8 @@ public void testDesiredNodesStatusIsTracked() { final var pendingDesiredNodes = randomList(0, 5, DesiredNodesTestCase::randomDesiredNode); final var updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), 1, concatLists(actualizedDesiredNodes, pendingDesiredNodes), @@ -43,11 +45,13 @@ public void testDesiredNodesStatusIsTracked() { updateDesiredNodes(updateDesiredNodesRequest); { - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); } final var newVersionUpdateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, updateDesiredNodesRequest.getHistoryID(), updateDesiredNodesRequest.getVersion() + 1, updateDesiredNodesRequest.getNodes(), @@ -56,7 +60,7 @@ public void testDesiredNodesStatusIsTracked() { updateDesiredNodes(newVersionUpdateDesiredNodesRequest); { - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); } } @@ -70,6 +74,8 @@ public void testIdempotentUpdateWithUpdatedStatus() { final var pendingDesiredNodes = randomList(0, 5, DesiredNodesTestCase::randomDesiredNode); final var updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), 1, concatLists(actualizedDesiredNodes, pendingDesiredNodes), @@ -78,14 +84,14 @@ public void testIdempotentUpdateWithUpdatedStatus() { updateDesiredNodes(updateDesiredNodesRequest); { - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); } updateDesiredNodes(updateDesiredNodesRequest); { - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); } } @@ -99,6 +105,8 @@ public void testActualizedDesiredNodesAreKeptAsActualizedEvenIfNodesLeavesTempor final var pendingDesiredNodes = randomList(0, 5, DesiredNodesTestCase::randomDesiredNode); final var updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), 1, concatLists(actualizedDesiredNodes, pendingDesiredNodes), @@ -106,7 +114,7 @@ public void testActualizedDesiredNodesAreKeptAsActualizedEvenIfNodesLeavesTempor ); updateDesiredNodes(updateDesiredNodesRequest); - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); final var leavingNodeNames = randomSubsetOf(nodeNames); @@ -114,7 +122,7 @@ public void testActualizedDesiredNodesAreKeptAsActualizedEvenIfNodesLeavesTempor internalCluster().stopNode(leavingNodeName); } - final var newClusterState = clusterAdmin().prepareState().get().getState(); + final var newClusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final var latestDesiredNodes = DesiredNodes.latestFromClusterState(newClusterState); for (String leavingNodeName : leavingNodeNames) { @@ -132,6 +140,8 @@ public void testStatusInformationIsClearedAfterHistoryIdChanges() throws Excepti final var pendingDesiredNodes = randomList(0, 5, DesiredNodesTestCase::randomDesiredNode); final var updateDesiredNodesRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), 1, concatLists(actualizedDesiredNodes, pendingDesiredNodes), @@ -139,7 +149,7 @@ public void testStatusInformationIsClearedAfterHistoryIdChanges() throws Excepti ); updateDesiredNodes(updateDesiredNodesRequest); - final var clusterState = clusterAdmin().prepareState().get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DesiredNodesTestCase.assertDesiredNodesStatusIsCorrect(clusterState, actualizedDesiredNodes, pendingDesiredNodes); // Stop some nodes, these shouldn't be actualized within the new desired node's history until they join back @@ -149,6 +159,8 @@ public void testStatusInformationIsClearedAfterHistoryIdChanges() throws Excepti } final var updateDesiredNodesWithNewHistoryRequest = new UpdateDesiredNodesRequest( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, randomAlphaOfLength(10), 1, updateDesiredNodesRequest.getNodes(), @@ -157,7 +169,7 @@ public void testStatusInformationIsClearedAfterHistoryIdChanges() throws Excepti final var response = updateDesiredNodes(updateDesiredNodesWithNewHistoryRequest); assertThat(response.hasReplacedExistingHistoryId(), is(equalTo(true))); - final var updatedClusterState = clusterAdmin().prepareState().get().getState(); + final var updatedClusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final var latestDesiredNodes = DesiredNodes.latestFromClusterState(updatedClusterState); for (String clusterNodeName : clusterNodeNames) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index d3cbab276074..a3c7f8b77a44 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -67,25 +67,25 @@ public void testTwoNodesNoMasterBlock() throws Exception { String node1Name = internalCluster().startNode(settings); logger.info("--> should be blocked, no master..."); - ClusterState state = clusterAdmin().prepareState().setLocal(true).get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); assertThat(state.nodes().getSize(), equalTo(1)); // verify that we still see the local node in the cluster state logger.info("--> start second node, cluster should be formed"); String node2Name = internalCluster().startNode(settings); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(false)); @@ -97,7 +97,10 @@ public void testTwoNodesNoMasterBlock() throws Exception { } // make sure that all shards recovered before trying to flush assertThat( - clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).get().getActiveShards(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test") + .setWaitForActiveShards(numShards.totalNumShards) + .get() + .getActiveShards(), equalTo(numShards.totalNumShards) ); // flush for simpler debugging @@ -111,17 +114,20 @@ public void testTwoNodesNoMasterBlock() throws Exception { String masterNode = internalCluster().getMasterName(); String otherNode = node1Name.equals(masterNode) ? node2Name : node1Name; logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected"); - client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(otherNode)).get(); + client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, otherNode) + ).get(); logger.info("--> stop master node, no master block should appear"); Settings masterDataPathSettings = internalCluster().dataPathSettings(masterNode); internalCluster().stopNode(masterNode); assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().setLocal(true).get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertTrue(clusterState.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); // verify that both nodes are still in the cluster state but there is no master assertThat(state.nodes().getSize(), equalTo(2)); @@ -130,19 +136,19 @@ public void testTwoNodesNoMasterBlock() throws Exception { logger.info("--> starting the previous master node again..."); node2Name = internalCluster().startNode(Settings.builder().put(settings).put(masterDataPathSettings).build()); - clusterHealthResponse = clusterAdmin().prepareHealth() + clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForNodes("2") .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(true)); @@ -154,20 +160,23 @@ public void testTwoNodesNoMasterBlock() throws Exception { } logger.info("--> clearing voting config exclusions"); - ClearVotingConfigExclusionsRequest clearRequest = new ClearVotingConfigExclusionsRequest(); + ClearVotingConfigExclusionsRequest clearRequest = new ClearVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT); clearRequest.setWaitForRemoval(false); client().execute(TransportClearVotingConfigExclusionsAction.TYPE, clearRequest).get(); masterNode = internalCluster().getMasterName(); otherNode = node1Name.equals(masterNode) ? node2Name : node1Name; logger.info("--> add voting config exclusion for master node, to be sure it's not elected"); - client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(masterNode)).get(); + client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, masterNode) + ).get(); logger.info("--> stop non-master node, no master block should appear"); Settings otherNodeDataPathSettings = internalCluster().dataPathSettings(otherNode); internalCluster().stopNode(otherNode); assertBusy(() -> { - ClusterState state1 = clusterAdmin().prepareState().setLocal(true).get().getState(); + ClusterState state1 = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); @@ -175,19 +184,19 @@ public void testTwoNodesNoMasterBlock() throws Exception { internalCluster().startNode(Settings.builder().put(settings).put(otherNodeDataPathSettings).build()); ensureGreen(); - clusterHealthResponse = clusterAdmin().prepareHealth() + clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .setWaitForGreenStatus() .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().setLocal(true).get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(false)); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.metadata().indices().containsKey("test"), equalTo(true)); @@ -212,7 +221,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { assertBusy(() -> { for (Client client : clients()) { - ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state1 = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); } }); @@ -221,13 +230,13 @@ public void testThreeNodesNoMasterBlock() throws Exception { internalCluster().startNode(settings); ensureGreen(); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("3") .get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.nodes().getSize(), equalTo(3)); createIndex("test"); @@ -239,7 +248,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { ensureGreen(); // make sure that all shards recovered before trying to flush assertThat( - clusterAdmin().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).get().isTimedOut(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").setWaitForActiveShards(numShards.totalNumShards).get().isTimedOut(), equalTo(false) ); // flush for simpler debugging @@ -262,7 +271,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { logger.info("--> verify that there is no master anymore on remaining node"); // spin here to wait till the state is set assertBusy(() -> { - ClusterState st = clusterAdmin().prepareState().setLocal(true).get().getState(); + ClusterState st = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat(st.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); @@ -272,7 +281,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { internalCluster().validateClusterFormed(); ensureGreen(); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.nodes().getSize(), equalTo(3)); logger.info("--> verify we the data back"); @@ -338,7 +347,7 @@ public void onFailure(Exception e) { DiscoveryNode masterNode = internalCluster().client(randomFrom(otherNodes)) .admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .nodes() @@ -350,7 +359,7 @@ public void onFailure(Exception e) { partition.stopDisrupting(); logger.debug("--> waiting for cluster to heal"); - assertNoTimeout(clusterAdmin().prepareHealth().setWaitForNodes("3").setWaitForEvents(Priority.LANGUID)); + assertNoTimeout(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("3").setWaitForEvents(Priority.LANGUID)); for (String node : internalCluster().getNodeNames()) { Settings nodeSetting = internalCluster().clusterService(node).state().metadata().settings(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java index d8c91d770437..6b104291693e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -72,7 +72,7 @@ public void testNoMasterActions() throws Exception { final List nodes = internalCluster().startNodes(3, settings); createIndex("test"); - clusterAdmin().prepareHealth("test").setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").setWaitForGreenStatus().get(); final NetworkDisruption disruptionScheme = new NetworkDisruption( new IsolateAllNodes(new HashSet<>(nodes)), @@ -84,7 +84,12 @@ public void testNoMasterActions() throws Exception { final Client clientToMasterlessNode = client(); assertBusy(() -> { - ClusterState state = clientToMasterlessNode.admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = clientToMasterlessNode.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -223,14 +228,14 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { prepareCreate("test1").setSettings(indexSettings(1, 2)).get(); prepareCreate("test2").setSettings(indexSettings(3, 0)).get(); - clusterAdmin().prepareHealth("_all").setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "_all").setWaitForGreenStatus().get(); prepareIndex("test1").setId("1").setSource("field", "value1").get(); prepareIndex("test2").setId("1").setSource("field", "value1").get(); refresh(); ensureSearchable("test1", "test2"); - ClusterStateResponse clusterState = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); logger.info("Cluster state:\n{}", clusterState.getState()); final NetworkDisruption disruptionScheme = new NetworkDisruption( @@ -243,7 +248,12 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { final Client clientToMasterlessNode = client(); assertBusy(() -> { - ClusterState state = clientToMasterlessNode.admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = clientToMasterlessNode.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -299,13 +309,13 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { final List nodes = internalCluster().startNodes(3, settings); prepareCreate("test1").setSettings(indexSettings(1, 1)).get(); - clusterAdmin().prepareHealth("_all").setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "_all").setWaitForGreenStatus().get(); prepareIndex("test1").setId("1").setSource("field", "value1").get(); refresh(); ensureGreen("test1"); - ClusterStateResponse clusterState = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); logger.info("Cluster state:\n{}", clusterState.getState()); final List nodesWithShards = clusterState.getState() @@ -321,7 +331,7 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { client().execute( TransportAddVotingConfigExclusionsAction.TYPE, - new AddVotingConfigExclusionsRequest(nodesWithShards.toArray(new String[0])) + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, nodesWithShards.toArray(new String[0])) ).get(); ensureGreen("test1"); @@ -336,7 +346,7 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { assertBusy(() -> { for (String node : nodesWithShards) { - ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = client(node).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); } }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java index 3135647adc9a..6b5bb08f0f24 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java @@ -62,7 +62,8 @@ public void testNodeRemovalFromNonRedCluster() throws Exception { case 2 -> req.setExternalIds(internalCluster().clusterService(nodeName).localNode().getExternalId()); default -> throw new IllegalStateException("Unexpected value"); } - PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req.build()).get(); + PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req.build(TEST_REQUEST_TIMEOUT)) + .get(); assertTrue(resp.getPrevalidation().isSafe()); assertThat(resp.getPrevalidation().message(), equalTo("cluster status is not RED")); assertThat(resp.getPrevalidation().nodes().size(), equalTo(1)); @@ -75,7 +76,7 @@ public void testNodeRemovalFromNonRedCluster() throws Exception { // Enforce a replica to get unassigned updateIndexSettings(Settings.builder().put("index.routing.allocation.require._name", node1), indexName); ensureYellow(); - PrevalidateNodeRemovalRequest req2 = PrevalidateNodeRemovalRequest.builder().setNames(node2).build(); + PrevalidateNodeRemovalRequest req2 = PrevalidateNodeRemovalRequest.builder().setNames(node2).build(TEST_REQUEST_TIMEOUT); PrevalidateNodeRemovalResponse resp2 = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req2).get(); assertTrue(resp2.getPrevalidation().isSafe()); assertThat(resp2.getPrevalidation().message(), equalTo("cluster status is not RED")); @@ -107,7 +108,7 @@ public void testNodeRemovalFromRedClusterWithNoLocalShardCopy() throws Exception internalCluster().stopNode(nodeWithIndex); ensureRed(indexName); String[] otherNodeNames = otherNodes.toArray(new String[otherNodes.size()]); - PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(otherNodeNames).build(); + PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(otherNodeNames).build(TEST_REQUEST_TIMEOUT); PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); assertTrue(resp.getPrevalidation().isSafe()); assertThat(resp.getPrevalidation().message(), equalTo("")); @@ -154,7 +155,7 @@ public void testNodeRemovalFromRedClusterWithLocalShardCopy() throws Exception { ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, new ShardId(index, 0), ""); assertNotNull("local index shards not found", shardPath); // Prevalidate removal of node1 - PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(node1).build(); + PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(node1).build(TEST_REQUEST_TIMEOUT); PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); String node1Id = getNodeId(node1); assertFalse(resp.getPrevalidation().isSafe()); @@ -183,7 +184,7 @@ public void testNodeRemovalFromRedClusterWithTimeout() throws Exception { }); PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder() .setNames(node2) - .build() + .build(TEST_REQUEST_TIMEOUT) .masterNodeTimeout(TimeValue.timeValueSeconds(1)) .timeout(TimeValue.timeValueSeconds(1)); PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); @@ -203,7 +204,7 @@ public void testNodeRemovalFromRedClusterWithTimeout() throws Exception { private void ensureRed(String indexName) throws Exception { assertBusy(() -> { - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(indexName) + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName) .setWaitForStatus(ClusterHealthStatus.RED) .setWaitForEvents(Priority.LANGUID) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 3dba41adec08..1259650d3779 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -76,13 +76,16 @@ public void indexData() throws Exception { } public void testRoutingTable() throws Exception { - ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState().clear().setRoutingTable(true).get(); + ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setRoutingTable(true) + .get(); assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("foo"), is(true)); assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("fuu"), is(true)); assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("baz"), is(true)); assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("non-existent"), is(false)); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().clear().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().get(); assertThat(clusterStateResponse.getState().routingTable().hasIndex("foo"), is(false)); assertThat(clusterStateResponse.getState().routingTable().hasIndex("fuu"), is(false)); assertThat(clusterStateResponse.getState().routingTable().hasIndex("baz"), is(false)); @@ -90,43 +93,49 @@ public void testRoutingTable() throws Exception { } public void testNodes() throws Exception { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().clear().setNodes(true).get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setNodes(true).get(); assertThat(clusterStateResponse.getState().nodes().getNodes().size(), is(cluster().size())); - ClusterStateResponse clusterStateResponseFiltered = clusterAdmin().prepareState().clear().get(); + ClusterStateResponse clusterStateResponseFiltered = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().get(); assertThat(clusterStateResponseFiltered.getState().nodes().getNodes().size(), is(0)); } public void testMetadata() throws Exception { - ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState().clear().setMetadata(true).get(); + ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .get(); assertThat(clusterStateResponseUnfiltered.getState().metadata().indices().size(), is(3)); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().clear().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().get(); assertThat(clusterStateResponse.getState().metadata().indices().size(), is(0)); } public void testMetadataVersion() { createIndex("index-1"); createIndex("index-2"); - long baselineVersion = clusterAdmin().prepareState().get().getState().metadata().version(); + long baselineVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().version(); assertThat(baselineVersion, greaterThan(0L)); assertThat( - clusterAdmin().prepareState().setIndices("index-1").get().getState().metadata().version(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("index-1").get().getState().metadata().version(), greaterThanOrEqualTo(baselineVersion) ); assertThat( - clusterAdmin().prepareState().setIndices("index-2").get().getState().metadata().version(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("index-2").get().getState().metadata().version(), greaterThanOrEqualTo(baselineVersion) ); assertThat( - clusterAdmin().prepareState().setIndices("*").get().getState().metadata().version(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("*").get().getState().metadata().version(), greaterThanOrEqualTo(baselineVersion) ); assertThat( - clusterAdmin().prepareState().setIndices("not-found").get().getState().metadata().version(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("not-found").get().getState().metadata().version(), greaterThanOrEqualTo(baselineVersion) ); - assertThat(clusterAdmin().prepareState().clear().setMetadata(false).get().getState().metadata().version(), equalTo(0L)); + assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(false).get().getState().metadata().version(), + equalTo(0L) + ); } public void testIndexTemplates() throws Exception { @@ -170,7 +179,7 @@ public void testIndexTemplates() throws Exception { ) .get(); - ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponseUnfiltered = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(clusterStateResponseUnfiltered.getState().metadata().templates().size(), is(greaterThanOrEqualTo(2))); GetIndexTemplatesResponse getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates("foo_template").get(); @@ -198,7 +207,7 @@ public void testThatFilteringByIndexWorksForMetadataAndRoutingTable() throws Exc * that the cluster state returns coherent data for both routing table and metadata. */ private void testFilteringByIndexWorks(String[] indices, String[] expected) { - ClusterStateResponse clusterState = clusterAdmin().prepareState() + ClusterStateResponse clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setRoutingTable(true) @@ -262,19 +271,23 @@ public void testLargeClusterStatePublishing() throws Exception { } public void testIndicesOptions() throws Exception { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().clear().setMetadata(true).setIndices("f*").get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .setIndices("f*") + .get(); assertThat(clusterStateResponse.getState().metadata().indices().size(), is(2)); ensureGreen("fuu"); // close one index assertAcked(indicesAdmin().close(new CloseIndexRequest("fuu")).get()); - clusterStateResponse = clusterAdmin().prepareState().clear().setMetadata(true).setIndices("f*").get(); + clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(true).setIndices("f*").get(); assertThat(clusterStateResponse.getState().metadata().indices().size(), is(1)); assertThat(clusterStateResponse.getState().metadata().index("foo").getState(), equalTo(IndexMetadata.State.OPEN)); // expand_wildcards_closed should toggle return only closed index fuu IndicesOptions expandCloseOptions = IndicesOptions.fromOptions(false, true, false, true); - clusterStateResponse = clusterAdmin().prepareState() + clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices("f*") @@ -285,7 +298,7 @@ public void testIndicesOptions() throws Exception { // ignore_unavailable set to true should not raise exception on fzzbzz IndicesOptions ignoreUnavailabe = IndicesOptions.fromOptions(true, true, true, false); - clusterStateResponse = clusterAdmin().prepareState() + clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices("fzzbzz") @@ -296,7 +309,7 @@ public void testIndicesOptions() throws Exception { // empty wildcard expansion result should work when allowNoIndices is // turned on IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, true, true, false); - clusterStateResponse = clusterAdmin().prepareState() + clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setIndices("a*") @@ -309,7 +322,12 @@ public void testIndicesOptionsOnAllowNoIndicesFalse() throws Exception { // empty wildcard expansion throws exception when allowNoIndices is turned off IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, false, true, false); try { - clusterAdmin().prepareState().clear().setMetadata(true).setIndices("a*").setIndicesOptions(allowNoIndices).get(); + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .setIndices("a*") + .setIndicesOptions(allowNoIndices) + .get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { assertThat(e.getMessage(), is("no such index [a*]")); @@ -320,7 +338,12 @@ public void testIndicesIgnoreUnavailableFalse() throws Exception { // ignore_unavailable set to false throws exception when allowNoIndices is turned off IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, true, true, false); try { - clusterAdmin().prepareState().clear().setMetadata(true).setIndices("fzzbzz").setIndicesOptions(allowNoIndices).get(); + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .setIndices("fzzbzz") + .setIndicesOptions(allowNoIndices) + .get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { assertThat(e.getMessage(), is("no such index [fzzbzz]")); @@ -330,7 +353,7 @@ public void testIndicesIgnoreUnavailableFalse() throws Exception { public void testPrivateCustomsAreExcluded() throws Exception { // ensure that the custom is injected into the cluster state assertBusy(() -> assertTrue(clusterService().state().customs().containsKey("test"))); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setCustoms(true).get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setCustoms(true).get(); assertFalse(clusterStateResponse.getState().customs().containsKey("test")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java index 8a239f7293e2..58daca22303c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleDataNodesIT.java @@ -47,7 +47,12 @@ public void testIndexingBeforeAndAfterDataNodesStart() { internalCluster().startNode(nonDataNode()); assertThat( - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).get().isTimedOut(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("2") + .setLocal(true) + .get() + .isTimedOut(), equalTo(false) ); @@ -62,7 +67,12 @@ public void testIndexingBeforeAndAfterDataNodesStart() { // now, start a node data, and see that it gets with shards internalCluster().startNode(dataNode()); assertThat( - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).get().isTimedOut(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("3") + .setLocal(true) + .get() + .isTimedOut(), equalTo(false) ); @@ -76,7 +86,9 @@ public void testShardsAllocatedAfterDataNodesStart() { new CreateIndexRequest("test").settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) .waitForActiveShards(ActiveShardCount.NONE) ).actionGet(); - final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .get(); assertThat(healthResponse1.isTimedOut(), equalTo(false)); assertThat(healthResponse1.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse1.getActiveShards(), equalTo(0)); @@ -84,7 +96,7 @@ public void testShardsAllocatedAfterDataNodesStart() { internalCluster().startNode(dataNode()); assertThat( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .setWaitForGreenStatus() @@ -100,7 +112,9 @@ public void testAutoExpandReplicasAdjustedWhenDataNodeJoins() { new CreateIndexRequest("test").settings(Settings.builder().put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all")) .waitForActiveShards(ActiveShardCount.NONE) ).actionGet(); - final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + final ClusterHealthResponse healthResponse1 = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .get(); assertThat(healthResponse1.isTimedOut(), equalTo(false)); assertThat(healthResponse1.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse1.getActiveShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 538f5e7a1640..8cdc49d3b12d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -37,7 +37,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); try { assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueMillis(100)) .get() .getState() @@ -52,11 +52,27 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start master node"); final String masterNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); @@ -66,7 +82,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { try { assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueMillis(100)) .get() .getState() @@ -84,11 +100,27 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { Settings.builder().put(nonDataNode(masterNode())).put(masterDataPathSettings) ); assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligibleNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligibleNodeName) ); } @@ -99,7 +131,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); try { assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .setMasterNodeTimeout(TimeValue.timeValueMillis(100)) .get() .getState() @@ -114,45 +146,112 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { logger.info("--> start master node (1)"); final String masterNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); logger.info("--> start master node (2)"); final String nextMasterEligableNodeName = internalCluster().startMasterOnlyNode(); assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(masterNodeName) ); logger.info("--> closing master node (1)"); - client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(masterNodeName)).get(); + client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, masterNodeName) + ).get(); // removing the master from the voting configuration immediately triggers the master to step down assertBusy(() -> { assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligableNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligableNodeName) ); }); internalCluster().stopNode(masterNodeName); assertThat( - internalCluster().nonMasterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligableNodeName) ); assertThat( - internalCluster().masterClient().admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), + internalCluster().masterClient() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getMasterNode() + .getName(), equalTo(nextMasterEligableNodeName) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java index 64ac8318dce2..05b58ea2f880 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java @@ -27,7 +27,7 @@ public void testUpdateSettingsValidation() throws Exception { createIndex("test"); NumShards test = getNumShards("test"); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth("test") + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test") .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("3") .setWaitForGreenStatus() @@ -36,7 +36,10 @@ public void testUpdateSettingsValidation() throws Exception { assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.totalNumShards)); setReplicaCount(0, "test"); - healthResponse = clusterAdmin().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test") + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.numPrimaries)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index 71418cb83deb..36d903205f05 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -71,7 +71,7 @@ public void testSimpleAwareness() throws Exception { // On slow machines the initial relocation might be delayed assertBusy(() -> { logger.info("--> waiting for no relocation"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test1", "test2") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -82,7 +82,7 @@ public void testSimpleAwareness() throws Exception { assertThat("Cluster health request timed out", clusterHealth.isTimedOut(), equalTo(false)); logger.info("--> checking current state"); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); // check that closed indices are effectively closed final List notClosedIndices = indicesToClose.stream() @@ -115,7 +115,7 @@ public void testAwarenessZones() { String A_1 = nodes.get(3); logger.info("--> waiting for nodes to form a cluster"); - ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForNodes("4").get(); + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("4").get(); assertThat(health.isTimedOut(), equalTo(false)); createIndex("test", 5, 1); @@ -125,7 +125,7 @@ public void testAwarenessZones() { } logger.info("--> waiting for shards to be allocated"); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -133,7 +133,7 @@ public void testAwarenessZones() { .get(); assertThat(health.isTimedOut(), equalTo(false)); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Map counts = computeShardCounts(clusterState); assertThat(counts.get(A_1), anyOf(equalTo(2), equalTo(3))); @@ -162,7 +162,7 @@ public void testAwarenessZonesIncrementalNodes() { assertAcked(indicesAdmin().prepareClose("test")); } - ClusterHealthResponse health = clusterAdmin().prepareHealth() + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -170,7 +170,7 @@ public void testAwarenessZonesIncrementalNodes() { .setWaitForNoRelocatingShards(true) .get(); assertThat(health.isTimedOut(), equalTo(false)); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Map counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -178,7 +178,7 @@ public void testAwarenessZonesIncrementalNodes() { logger.info("--> starting another node in zone 'b'"); String B_1 = internalCluster().startNode(Settings.builder().put(commonSettings).put("node.attr.zone", "b").build()); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -186,7 +186,7 @@ public void testAwarenessZonesIncrementalNodes() { .get(); assertThat(health.isTimedOut(), equalTo(false)); ClusterRerouteUtils.reroute(client()); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -196,7 +196,7 @@ public void testAwarenessZonesIncrementalNodes() { .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -204,7 +204,7 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(counts.get(B_1), equalTo(2)); String noZoneNode = internalCluster().startNode(); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -212,7 +212,7 @@ public void testAwarenessZonesIncrementalNodes() { .get(); assertThat(health.isTimedOut(), equalTo(false)); ClusterRerouteUtils.reroute(client()); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -222,7 +222,7 @@ public void testAwarenessZonesIncrementalNodes() { .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(5)); @@ -230,7 +230,7 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(counts.get(B_1), equalTo(2)); assertThat(counts.containsKey(noZoneNode), equalTo(false)); updateClusterSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "")); - health = clusterAdmin().prepareHealth() + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -240,7 +240,7 @@ public void testAwarenessZonesIncrementalNodes() { .get(); assertThat(health.isTimedOut(), equalTo(false)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); counts = computeShardCounts(clusterState); assertThat(counts.get(A_0), equalTo(3)); @@ -254,7 +254,8 @@ public void testForceAwarenessSettingValidation() { final IllegalArgumentException illegalArgumentException = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "nonsense", "foo")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put(prefix + "nonsense", "foo")) ); assertThat(illegalArgumentException.getMessage(), containsString("[cluster.routing.allocation.awareness.force.]")); assertThat(illegalArgumentException.getCause(), instanceOf(SettingsException.class)); @@ -263,7 +264,8 @@ public void testForceAwarenessSettingValidation() { assertThat( expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "attr.not_values", "foo")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put(prefix + "attr.not_values", "foo")) ).getMessage(), containsString("[cluster.routing.allocation.awareness.force.attr.not_values]") ); @@ -271,7 +273,8 @@ public void testForceAwarenessSettingValidation() { assertThat( expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "attr.values.junk", "foo")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put(prefix + "attr.values.junk", "foo")) ).getMessage(), containsString("[cluster.routing.allocation.awareness.force.attr.values.junk]") ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index dc93aaa81401..da585d1bb67d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -96,7 +96,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, *under dry_run*"); @@ -115,7 +115,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { ); logger.info("--> get the state, verify nothing changed because of the dry run"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, actually allocating, no dry run"); @@ -132,7 +132,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { equalTo(ShardRoutingState.INITIALIZING) ); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() @@ -140,7 +140,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), @@ -165,7 +165,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { equalTo(ShardRoutingState.INITIALIZING) ); - healthResponse = clusterAdmin().prepareHealth() + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() @@ -174,7 +174,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary moved from node1 to node2"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), @@ -209,7 +209,7 @@ public void testDelayWithALargeAmountOfShards() throws Exception { internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(4)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("4").get(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("4").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create indices"); @@ -239,7 +239,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc String node_1 = internalCluster().startNode(commonSettings); internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(2)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate"); @@ -253,7 +253,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2)); logger.info("--> explicitly allocate shard 1, actually allocating, no dry run"); @@ -270,7 +270,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc equalTo(ShardRoutingState.INITIALIZING) ); - healthResponse = clusterAdmin().prepareHealth() + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test") .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() @@ -278,7 +278,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); assertThat( state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), @@ -306,7 +306,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc // TODO can we get around this? the cluster is RED, so what do we wait for? ClusterRerouteUtils.reroute(client()); assertThat( - clusterAdmin().prepareHealth().setIndices("test").setWaitForNodes("2").get().getStatus(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setIndices("test").setWaitForNodes("2").get().getStatus(), equalTo(ClusterHealthStatus.RED) ); logger.info("--> explicitly allocate primary"); @@ -326,7 +326,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc logger.info("--> get the state, verify shard 1 primary allocated"); final String nodeToCheck = node_1; assertBusy(() -> { - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); String nodeId = clusterState.nodes().resolveNode(nodeToCheck).getId(); assertThat(clusterState.getRoutingNodes().node(nodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); }); @@ -339,7 +339,7 @@ public void testRerouteExplain() { String node_1 = internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(1)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").get(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("1").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> create an index with 1 shard"); @@ -356,7 +356,7 @@ public void testRerouteExplain() { logger.info("--> starting a second node"); String node_2 = internalCluster().startNode(commonSettings); assertThat(cluster().size(), equalTo(2)); - healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> try to move the shard from node1 to node2"); @@ -385,12 +385,12 @@ public void testMessageLogging() { final String nodeName1 = internalCluster().startNode(settings); assertThat(cluster().size(), equalTo(1)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("1").get(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("1").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); final String nodeName2 = internalCluster().startNode(settings); assertThat(cluster().size(), equalTo(2)); - healthResponse = clusterAdmin().prepareHealth().setWaitForNodes("2").get(); + healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("2").get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); final String indexName = "test_index"; @@ -474,7 +474,7 @@ public void testClusterRerouteWithBlocks() { ensureGreen("test-blocks"); logger.info("--> check that the index has 1 shard"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List shards = state.routingTable().allShards("test-blocks"); assertThat(shards, hasSize(1)); @@ -504,7 +504,7 @@ public void testClusterRerouteWithBlocks() { new MoveAllocationCommand("test-blocks", 0, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)) ); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices("test-blocks") .setWaitForYellowStatus() .setWaitForNoRelocatingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index 5f54b32ab4a1..abce3ce30fba 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -66,7 +66,7 @@ public void testDecommissionNodeNoReplicas() { ensureGreen("test"); logger.info("--> verify all are allocated on node1 now"); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { final IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId); @@ -93,7 +93,7 @@ public void testAutoExpandReplicasToFilteredNodes() { logger.info("--> creating an index with auto-expand replicas"); createIndex("test", Settings.builder().put(AutoExpandReplicas.SETTING.getKey(), "0-all").build()); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().index("test").getNumberOfReplicas(), equalTo(1)); ensureGreen("test"); @@ -106,7 +106,7 @@ public void testAutoExpandReplicasToFilteredNodes() { ensureGreen("test"); logger.info("--> verify all are allocated on node1 now"); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().index("test").getNumberOfReplicas(), equalTo(0)); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { @@ -142,7 +142,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); } - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test"); int numShardsOnNode1 = 0; for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { @@ -165,7 +165,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); logger.info("--> verify all shards are allocated on node_1 now"); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); indexRoutingTable = clusterState.routingTable().index("test"); for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { final IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId); @@ -180,7 +180,7 @@ public void testDisablingAllocationFiltering() { ensureGreen("test"); logger.info("--> verify that there are shards allocated on both nodes now"); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.routingTable().index("test").numberOfNodesShardsAreAllocatedOn(), equalTo(2)); } @@ -193,7 +193,7 @@ public void testInvalidIPFilterClusterSettings() { ); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.1.1.")) ); assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); @@ -221,12 +221,12 @@ public void testTransientSettingsStillApplied() { .build(); logger.info("--> updating settings"); - clusterAdmin().prepareUpdateSettings().setTransientSettings(exclude).get(); + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(exclude).get(); logger.info("--> waiting for relocation"); waitForRelocation(ClusterHealthStatus.GREEN); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (ShardRouting shard : RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED)) { String node = state.getRoutingNodes().node(shard.currentNodeId()).node().getName(); @@ -243,12 +243,15 @@ public void testTransientSettingsStillApplied() { Settings other = Settings.builder().put("cluster.info.update.interval", "45s").build(); logger.info("--> updating settings with random persistent setting"); - clusterAdmin().prepareUpdateSettings().setPersistentSettings(other).setTransientSettings(exclude).get(); + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(other) + .setTransientSettings(exclude) + .get(); logger.info("--> waiting for relocation"); waitForRelocation(ClusterHealthStatus.GREEN); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); // The transient settings still exist in the state assertThat(state.metadata().transientSettings(), equalTo(exclude)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java index f66430871c9d..6a3d2f2fe521 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java @@ -33,7 +33,7 @@ public void testSaneAllocation() { } ensureGreen("test"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { if (node.isEmpty() == false) { @@ -42,7 +42,7 @@ public void testSaneAllocation() { } setReplicaCount(0, "test"); ensureGreen("test"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { @@ -60,7 +60,7 @@ public void testSaneAllocation() { setReplicaCount(1, "test"); ensureGreen("test"); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0)); for (RoutingNode node : state.getRoutingNodes()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java index 97112b97cc13..eebd059ed13b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java @@ -33,7 +33,13 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { private static void assertClusterUuid(boolean expectCommitted, String expectedValue) { for (String nodeName : internalCluster().getNodeNames()) { - final Metadata metadata = client(nodeName).admin().cluster().prepareState().setLocal(true).get().getState().metadata(); + final Metadata metadata = client(nodeName).admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .metadata(); assertEquals(expectCommitted, metadata.clusterUUIDCommitted()); assertEquals(expectedValue, metadata.clusterUUID()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index a20865617933..bd12f570e136 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -135,7 +135,7 @@ public void testDeleteCreateInOneBulk() throws Exception { final var dataNode = internalCluster().startDataOnlyNode(); final var dataNodeClusterService = internalCluster().clusterService(dataNode); - assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("2").get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("2").get().isTimedOut()); prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java index 65484066ee9b..a9948367f278 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommandIT.java @@ -46,7 +46,10 @@ public void testRemoveCustomsSuccessful() throws Exception { String node = internalCluster().startNode(); createIndex("test"); indicesAdmin().prepareDelete("test").get(); - assertEquals(1, clusterAdmin().prepareState().get().getState().metadata().indexGraveyard().getTombstones().size()); + assertEquals( + 1, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indexGraveyard().getTombstones().size() + ); Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); internalCluster().stopRandomDataNode(); @@ -64,7 +67,10 @@ public void testRemoveCustomsSuccessful() throws Exception { assertThat(terminal.getOutput(), containsString("index-graveyard")); internalCluster().startNode(dataPathSettings); - assertEquals(0, clusterAdmin().prepareState().get().getState().metadata().indexGraveyard().getTombstones().size()); + assertEquals( + 0, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indexGraveyard().getTombstones().size() + ); } public void testCustomDoesNotMatch() throws Exception { @@ -72,7 +78,10 @@ public void testCustomDoesNotMatch() throws Exception { String node = internalCluster().startNode(); createIndex("test"); indicesAdmin().prepareDelete("test").get(); - assertEquals(1, clusterAdmin().prepareState().get().getState().metadata().indexGraveyard().getTombstones().size()); + assertEquals( + 1, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indexGraveyard().getTombstones().size() + ); Settings dataPathSettings = internalCluster().dataPathSettings(node); ensureStableCluster(1); internalCluster().stopRandomDataNode(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java index 560ca3e8a548..527d8b0a62fe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RemoveSettingsCommandIT.java @@ -58,7 +58,7 @@ public void testRemoveSettingsSuccessful() throws Exception { Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false) ); assertThat( - clusterAdmin().prepareState().get().getState().metadata().persistentSettings().keySet(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().persistentSettings().keySet(), contains(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey()) ); Settings dataPathSettings = internalCluster().dataPathSettings(node); @@ -84,7 +84,7 @@ public void testRemoveSettingsSuccessful() throws Exception { internalCluster().startNode(dataPathSettings); assertThat( - clusterAdmin().prepareState().get().getState().metadata().persistentSettings().keySet(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().persistentSettings().keySet(), not(contains(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey())) ); } @@ -96,7 +96,7 @@ public void testSettingDoesNotMatch() throws Exception { Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false) ); assertThat( - clusterAdmin().prepareState().get().getState().metadata().persistentSettings().keySet(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().persistentSettings().keySet(), contains(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey()) ); Settings dataPathSettings = internalCluster().dataPathSettings(node); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 00e171a7a132..2c1ca5866fa4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -137,7 +137,7 @@ public void testBootstrapNotBootstrappedCluster() throws Exception { .build() ); assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().setLocal(true).get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -242,7 +242,13 @@ public void test3MasterNodes2Failed() throws Exception { logger.info("--> ensure NO_MASTER_BLOCK on data-only node"); assertBusy(() -> { - ClusterState state = internalCluster().client(dataNode).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = internalCluster().client(dataNode) + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); @@ -288,7 +294,13 @@ public void test3MasterNodes2Failed() throws Exception { logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state"); assertBusy(() -> { - ClusterState state = internalCluster().client(dataNode2).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = internalCluster().client(dataNode2) + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertFalse(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); assertTrue(state.metadata().persistentSettings().getAsBoolean(UnsafeBootstrapMasterCommand.UNSAFE_BOOTSTRAP.getKey(), false)); }); @@ -333,7 +345,13 @@ public void testNoInitialBootstrapAfterDetach() throws Exception { .build() ); - ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = internalCluster().client() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); internalCluster().stopNode(node); @@ -345,7 +363,7 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata( Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode); updateClusterSettings(Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")); - ClusterState state = internalCluster().client().admin().cluster().prepareState().get().getState(); + ClusterState state = internalCluster().client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); internalCluster().stopCurrentMasterNode(); @@ -359,7 +377,7 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata( internalCluster().startMasterOnlyNode(masterNodeDataPathSettings); ensureGreen(); - state = internalCluster().client().admin().cluster().prepareState().get().getState(); + state = internalCluster().client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.metadata().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java index b0cc81bf3481..6e21c3622ec4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java @@ -43,8 +43,11 @@ public void testAbdicateAfterVotingConfigExclusionAdded() throws ExecutionExcept final String originalMaster = internalCluster().getMasterName(); logger.info("--> excluding master node {}", originalMaster); - client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(originalMaster)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, originalMaster) + ).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get(); assertNotEquals(originalMaster, internalCluster().getMasterName()); } @@ -60,7 +63,7 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { internalCluster().client() .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes("4") .setWaitForEvents(Priority.LANGUID) .get() @@ -71,7 +74,7 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { final ClusterState clusterState = internalCluster().client() .admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setNodes(true) .setMetadata(true) @@ -111,7 +114,7 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { internalCluster().client() .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes("3") .setWaitForEvents(Priority.LANGUID) .get() @@ -121,7 +124,7 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { final ClusterState newClusterState = internalCluster().client() .admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setNodes(true) .setMetadata(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java index 9b117365777c..ea127a735291 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java @@ -35,7 +35,7 @@ public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Excep internalCluster().startNodes(2, masterNodeSettings); Settings dateNodeSettings = dataNode(); internalCluster().startNodes(2, dateNodeSettings); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("4") .setWaitForNoRelocatingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java index 8cee57ee34b8..69923c787a05 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/AllocationIdIT.java @@ -111,7 +111,7 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale // allocation fails due to corruption marker assertBusy(() -> { - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final ShardRouting shardRouting = state.routingTable().index(indexName).shard(shardId.id()).primaryShard(); assertThat(shardRouting.state(), equalTo(ShardRoutingState.UNASSIGNED)); assertThat(shardRouting.unassignedInfo().reason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); @@ -143,7 +143,9 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale } public void checkHealthStatus(String indexName, ClusterHealthStatus healthStatus) { - final ClusterHealthStatus indexHealthStatus = clusterAdmin().health(new ClusterHealthRequest(indexName)).actionGet().getStatus(); + final ClusterHealthStatus indexHealthStatus = clusterAdmin().health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, indexName)) + .actionGet() + .getStatus(); assertThat(indexHealthStatus, is(healthStatus)); } @@ -169,7 +171,7 @@ private Path getIndexPath(String nodeName, ShardId shardId) { } private Set getAllocationIds(String indexName) { - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); return state.metadata().index(indexName).inSyncAllocationIds(0); } @@ -181,7 +183,14 @@ private IndexSettings getIndexSettings(String indexName, String nodeName) { private String historyUUID(String node, String indexName) { final ShardStats[] shards = client(node).admin().indices().prepareStats(indexName).clear().get().getShards(); - final String nodeId = client(node).admin().cluster().prepareState().get().getState().nodes().resolveNode(node).getId(); + final String nodeId = client(node).admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .resolveNode(node) + .getId(); assertThat(shards.length, greaterThan(0)); final Set historyUUIDs = Arrays.stream(shards) .filter(shard -> shard.getShardRouting().currentNodeId().equals(nodeId)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index 543b0be8ae48..8a2f5d749ff2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -32,7 +32,7 @@ public void testNoDelayedTimeout() throws Exception { ensureGreen("test"); indexRandomData(); internalCluster().stopNode(findNodeWithShard()); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(0)); ensureGreen("test"); } @@ -53,9 +53,12 @@ public void testDelayedAllocationNodeLeavesAndComesBack() throws Exception { Settings nodeWithShardDataPathSettings = internalCluster().dataPathSettings(nodeWithShard); internalCluster().stopNode(nodeWithShard); assertBusy( - () -> assertThat(clusterAdmin().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true)) + () -> assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState().getRoutingNodes().unassigned().size() > 0, + equalTo(true) + ) ); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(1)); internalCluster().startNode(nodeWithShardDataPathSettings); // this will use the same data location as the stopped node ensureGreen("test"); } @@ -97,16 +100,19 @@ public void testDelayedAllocationChangeWithSettingTo100ms() throws Exception { indexRandomData(); internalCluster().stopNode(findNodeWithShard()); assertBusy( - () -> assertThat(clusterAdmin().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true)) + () -> assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState().getRoutingNodes().unassigned().size() > 0, + equalTo(true) + ) ); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(1)); logger.info("Setting shorter allocation delay"); updateIndexSettings( Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100)), "test" ); ensureGreen("test"); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(0)); } /** @@ -123,15 +129,18 @@ public void testDelayedAllocationChangeWithSettingTo0() throws Exception { indexRandomData(); internalCluster().stopNode(findNodeWithShard()); assertBusy( - () -> assertThat(clusterAdmin().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true)) + () -> assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState().getRoutingNodes().unassigned().size() > 0, + equalTo(true) + ) ); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(1)); updateIndexSettings( Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(0)), "test" ); ensureGreen("test"); - assertThat(clusterAdmin().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getDelayedUnassignedShards(), equalTo(0)); } private void indexRandomData() throws Exception { @@ -147,7 +156,7 @@ private void indexRandomData() throws Exception { } private String findNodeWithShard() { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List startedShards = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED); return state.nodes().get(randomFrom(startedShards).currentNodeId()).getName(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index d97063454920..9a13470eea25 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -118,7 +118,7 @@ public void testBulkWeirdScenario() throws Exception { private Settings createStaleReplicaScenario(String master) throws Exception { prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); refresh(); - ClusterState state = clusterAdmin().prepareState().all().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState(); List shards = state.routingTable().allShards("test"); assertThat(shards.size(), equalTo(2)); @@ -164,7 +164,10 @@ private Settings createStaleReplicaScenario(String master) throws Exception { ); // kick reroute a second time and check that all shards are unassigned ClusterRerouteUtils.reroute(client(master)); - assertThat(client(master).admin().cluster().prepareState().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); + assertThat( + client(master).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getRoutingNodes().unassigned().size(), + equalTo(2) + ); return inSyncDataPathSettings; } @@ -197,7 +200,7 @@ public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exce internalCluster().stopNode(dataNodeWithShardCopy); ensureStableCluster(1); assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingTable() @@ -223,11 +226,11 @@ public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exce logger.info("--> wait until shard is failed and becomes unassigned again"); assertTrue( - clusterAdmin().prepareState().get().getState().toString(), - clusterAdmin().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().toString(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned() ); assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingTable() @@ -302,7 +305,14 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { // search can throw an "all shards failed" exception. We will wait until the shard initialization has completed before // verifying the search hit count. assertBusy( - () -> assertTrue(clusterAdmin().prepareState().get().getState().routingTable().index(idxName).allPrimaryShardsActive()) + () -> assertTrue( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .routingTable() + .index(idxName) + .allPrimaryShardsActive() + ) ); } ShardStats[] shardStats = indicesAdmin().prepareStats("test") @@ -313,7 +323,7 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { assertThat(shardStat.getCommitStats().getNumDocs(), equalTo(useStaleReplica ? 1 : 0)); } // allocation id of old primary was cleaned from the in-sync set - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals( Collections.singleton(state.routingTable().index(idxName).shard(0).primary.allocationId().getId()), @@ -402,7 +412,15 @@ public void testForcePrimaryShardIfAllocationDecidersSayNoAfterIndexCreation() t .setSettings(indexSettings(1, 0).put("index.routing.allocation.exclude._name", node)) .get(); - assertThat(clusterAdmin().prepareState().get().getState().getRoutingTable().shardRoutingTable("test", 0).assignedShards(), empty()); + assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getRoutingTable() + .shardRoutingTable("test", 0) + .assignedShards(), + empty() + ); ClusterRerouteUtils.reroute(client(), new AllocateEmptyPrimaryAllocationCommand("test", 0, node, true)); ensureGreen("test"); @@ -419,7 +437,10 @@ public void testDoNotRemoveAllocationIdOnNodeLeave() throws Exception { final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopNode(replicaNode); ensureYellow("test"); - assertEquals(2, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); + assertEquals( + 2, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").inSyncAllocationIds(0).size() + ); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override public boolean clearData(String nodeName) { @@ -428,9 +449,19 @@ public boolean clearData(String nodeName) { }); logger.info("--> wait until shard is failed and becomes unassigned again"); assertBusy( - () -> assertTrue(clusterAdmin().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()) + () -> assertTrue( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getRoutingTable() + .index("test") + .allPrimaryShardsUnassigned() + ) + ); + assertEquals( + 2, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").inSyncAllocationIds(0).size() ); - assertEquals(2, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); internalCluster().startDataOnlyNode(inSyncDataPathSettings); @@ -448,10 +479,16 @@ public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception { final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopNode(replicaNode); ensureYellow("test"); - assertEquals(2, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); + assertEquals( + 2, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").inSyncAllocationIds(0).size() + ); logger.info("--> indexing..."); prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); - assertEquals(1, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); + assertEquals( + 1, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").inSyncAllocationIds(0).size() + ); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override public boolean clearData(String nodeName) { @@ -460,14 +497,31 @@ public boolean clearData(String nodeName) { }); logger.info("--> wait until shard is failed and becomes unassigned again"); assertBusy( - () -> assertTrue(clusterAdmin().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()) + () -> assertTrue( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getRoutingTable() + .index("test") + .allPrimaryShardsUnassigned() + ) + ); + assertEquals( + 1, + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").inSyncAllocationIds(0).size() ); - assertEquals(1, clusterAdmin().prepareState().get().getState().metadata().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); internalCluster().startDataOnlyNode(inSyncDataPathSettings); assertBusy( - () -> assertTrue(clusterAdmin().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned()) + () -> assertTrue( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getRoutingTable() + .index("test") + .allPrimaryShardsUnassigned() + ) ); } @@ -506,7 +560,13 @@ public void testForceAllocatePrimaryOnNoDecision() throws Exception { ensureGreen(indexName); assertEquals( 1, - clusterAdmin().prepareState().get().getState().routingTable().index(indexName).shardsWithState(ShardRoutingState.STARTED).size() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .routingTable() + .index(indexName) + .shardsWithState(ShardRoutingState.STARTED) + .size() ); } @@ -547,7 +607,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { internalCluster().stopNode(oldPrimary); // Checks that we fails replicas in one side but not mark them as stale. assertBusy(() -> { - ClusterState state = client(master).admin().cluster().prepareState().get().getState(); + ClusterState state = client(master).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexShardRoutingTable shardRoutingTable = state.routingTable().shardRoutingTable(shardId); final String newPrimaryNode = state.getRoutingNodes().node(shardRoutingTable.primary.currentNodeId()).node().getName(); assertThat(newPrimaryNode, not(equalTo(oldPrimary))); @@ -563,7 +623,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { partition.ensureHealthy(internalCluster()); logger.info("--> stop disrupting network and re-enable allocation"); assertBusy(() -> { - ClusterState state = client(master).admin().cluster().prepareState().get().getState(); + ClusterState state = client(master).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.routingTable().shardRoutingTable(shardId).activeShards(), hasSize(numberOfReplicas)); assertThat(state.metadata().index("test").inSyncAllocationIds(shardId.id()), hasSize(numberOfReplicas + 1)); for (String node : replicaNodes) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/RemoveReplicaPriorityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/RemoveReplicaPriorityIT.java index 57c4c0986a79..de61c6cf566c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/RemoveReplicaPriorityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/RemoveReplicaPriorityIT.java @@ -52,7 +52,7 @@ public void testReplicaRemovalPriority() throws Exception { }); } - final String dataNodeIdFilter = clusterAdmin().prepareState() + final String dataNodeIdFilter = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setNodes(true) .get() @@ -74,7 +74,7 @@ public void testReplicaRemovalPriority() throws Exception { ); assertBusy(() -> { - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -90,7 +90,7 @@ public void testReplicaRemovalPriority() throws Exception { updateIndexSettings(Settings.builder().putNull(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._id"), INDEX_NAME); assertBusy(() -> { - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -107,7 +107,7 @@ public void testReplicaRemovalPriority() throws Exception { setReplicaCount(2, INDEX_NAME); assertBusy(() -> { - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -125,7 +125,7 @@ public void testReplicaRemovalPriority() throws Exception { setReplicaCount(1, INDEX_NAME); assertBusy(() -> { - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -143,7 +143,7 @@ public void testReplicaRemovalPriority() throws Exception { setReplicaCount(0, INDEX_NAME); assertBusy(() -> { - final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState() + final IndexShardRoutingTable indexShardRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 85a04ee6f185..556836736a9f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -296,7 +296,7 @@ public void testShardCreation() throws Exception { createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); - final var clusterState = clusterAdmin().prepareState().clear().setRoutingTable(true).get().getState(); + final var clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setRoutingTable(true).get().getState(); // verify non-DEFAULT roles reported in cluster state XContent assertRolesInRoutingTableXContent(clusterState); @@ -440,7 +440,7 @@ public void testPromotion() { @Nullable public AllocationCommand getCancelPrimaryCommand() { - final var indexRoutingTable = clusterAdmin().prepareState() + final var indexRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -488,7 +488,7 @@ public void testSearchRouting() throws Exception { assertEngineTypes(); final var searchShardProfileKeys = new HashSet(); - final var indexRoutingTable = clusterAdmin().prepareState() + final var indexRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/AllocationFailuresResetIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/AllocationFailuresResetIT.java index 671c308f98fb..6b97a8b6f3ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/AllocationFailuresResetIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/AllocationFailuresResetIT.java @@ -48,7 +48,7 @@ private void removeAllocationFailuresInjection(String node) { private void awaitShardAllocMaxRetries() throws Exception { var maxRetries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(internalCluster().getDefaultSettings()); assertBusy(() -> { - var state = clusterAdmin().prepareState().get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); var index = state.getRoutingTable().index(INDEX); assertNotNull(index); var shard = index.shard(SHARD).primaryShard(); @@ -61,7 +61,7 @@ private void awaitShardAllocMaxRetries() throws Exception { private void awaitShardAllocSucceed() throws Exception { assertBusy(() -> { - var state = clusterAdmin().prepareState().get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); var index = state.getRoutingTable().index(INDEX); assertNotNull(index); var shard = index.shard(SHARD).primaryShard(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java index eb62ad5e6eec..5509f46786a8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java @@ -86,7 +86,7 @@ public void testFloodStageExceeded() throws Exception { final String newDataNodeName = internalCluster().startDataOnlyNode(); final String newDataNodeId = clusterAdmin().prepareNodesInfo(newDataNodeName).get().getNodes().get(0).getNode().getId(); assertBusy(() -> { - final ShardRouting primaryShard = clusterAdmin().prepareState() + final ShardRouting primaryShard = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .setNodes(true) @@ -103,7 +103,7 @@ public void testFloodStageExceeded() throws Exception { // Verify that the block is removed once the shard migration is complete refreshClusterInfo(); - assertFalse(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get().isTimedOut()); assertNull(getIndexBlock(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE)); } @@ -135,7 +135,7 @@ public void testRemoveExistingIndexBlocksWhenDiskThresholdMonitorIsDisabled() th // Verify that the block is removed refreshClusterInfo(); - assertFalse(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get().isTimedOut()); assertNull(getIndexBlock(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE)); // Re-enable and the blocks should be back! @@ -143,7 +143,7 @@ public void testRemoveExistingIndexBlocksWhenDiskThresholdMonitorIsDisabled() th Settings.builder().put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) ); refreshClusterInfo(); - assertFalse(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get().isTimedOut()); assertThat(getIndexBlock(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE), equalTo("true")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java index e3b6f2ddba4c..2f4b3588cf56 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java @@ -28,7 +28,7 @@ public void testPrimaryFailureIncreasesTerm() throws Exception { logger.info("--> disabling allocation to capture shard failure"); disableAllocation("test"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final int shard = randomBoolean() ? 0 : 1; final String nodeId = state.routingTable().index("test").shard(shard).primaryShard().currentNodeId(); final String node = state.nodes().get(nodeId).getName(); @@ -38,7 +38,12 @@ public void testPrimaryFailureIncreasesTerm() throws Exception { logger.info("--> waiting for a yellow index"); // we can't use ensureYellow since that one is just as happy with a GREEN status. - assertBusy(() -> assertThat(clusterAdmin().prepareHealth("test").get().getStatus(), equalTo(ClusterHealthStatus.YELLOW))); + assertBusy( + () -> assertThat( + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").get().getStatus(), + equalTo(ClusterHealthStatus.YELLOW) + ) + ); final long term0 = shard == 0 ? 2 : 1; final long term1 = shard == 1 ? 2 : 1; @@ -53,7 +58,7 @@ public void testPrimaryFailureIncreasesTerm() throws Exception { protected void assertPrimaryTerms(long shard0Term, long shard1Term) { for (String node : internalCluster().getNodeNames()) { logger.debug("--> asserting primary terms terms on [{}]", node); - ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = client(node).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); IndexMetadata metadata = state.metadata().index("test"); assertThat(metadata.primaryTerm(0), equalTo(shard0Term)); assertThat(metadata.primaryTerm(1), equalTo(shard1Term)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index a1a29468cc5b..106fd9530c3a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -231,7 +231,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard private Set getShardIds(final String nodeId, final String indexName) { final Set shardIds = new HashSet<>(); - final IndexRoutingTable indexRoutingTable = clusterAdmin().prepareState() + final IndexRoutingTable indexRoutingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setRoutingTable(true) .get() @@ -319,7 +319,7 @@ private void refreshDiskUsage() { } assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setWaitForNoInitializingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 7464f83cb281..fd7e9f8fb357 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -75,7 +75,7 @@ public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); } - final List nodeIds = clusterAdmin().prepareState() + final List nodeIds = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingNodes() @@ -153,7 +153,7 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); } - final List nodeIds = clusterAdmin().prepareState() + final List nodeIds = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingNodes() @@ -211,7 +211,7 @@ public void testAutomaticReleaseOfIndexBlock() throws Exception { () -> assertBlocked(prepareIndex("test").setId("1").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK) ); - assertFalse(clusterAdmin().prepareHealth("test").setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").setWaitForEvents(Priority.LANGUID).get().isTimedOut()); // Cannot add further documents assertBlocked(prepareIndex("test").setId("2").setSource("foo", "bar"), IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK); @@ -261,7 +261,7 @@ public void testOnlyMovesEnoughShardsToDropBelowHighWatermark() throws Exception .put(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "0ms") ); - final List nodeIds = clusterAdmin().prepareState() + final List nodeIds = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingNodes() @@ -318,7 +318,7 @@ public void testDoesNotExceedLowWatermarkWhenRebalancing() throws Exception { final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); - final List nodeIds = clusterAdmin().prepareState() + final List nodeIds = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingNodes() @@ -414,7 +414,7 @@ public void testMovesShardsOffSpecificDataPathAboveWatermark() throws Exception .put(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "0ms") ); - final List nodeIds = clusterAdmin().prepareState() + final List nodeIds = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .getRoutingNodes() @@ -483,7 +483,7 @@ public void testMovesShardsOffSpecificDataPathAboveWatermark() throws Exception private Map getShardCountByNodeId() { final Map shardCountByNodeId = new HashMap<>(); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (final RoutingNode node : clusterState.getRoutingNodes()) { logger.info( "----> node {} has {} shards", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java index 921ed3265f1b..be530f0bd4cb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/UpdateShardAllocationSettingsIT.java @@ -100,7 +100,7 @@ public void testUpdateSameHostSetting() { updateClusterSettings(Settings.builder().put(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey(), true)); final String indexName = "idx"; createIndex(indexName, 1, 1); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse( "replica should be unassigned", clusterState.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).isEmpty() @@ -109,7 +109,7 @@ public void testUpdateSameHostSetting() { // the same host - the replica should get assigned updateClusterSettings(Settings.builder().put(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey(), false)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertTrue( "all shards should be assigned", clusterState.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).isEmpty() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index a142d594fe06..a9767cce318d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -43,7 +43,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { @After public void cleanup() throws Exception { assertAcked( - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull("*")) .setTransientSettings(Settings.builder().putNull("*")) ); @@ -64,7 +64,7 @@ private void testClusterNonExistingSettingsUpdate( String key1 = "no_idea_what_you_are_talking_about"; int value1 = 10; try { - ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); consumer.accept(Settings.builder().put(key1, value1), builder); builder.get(); @@ -95,7 +95,7 @@ private void testDeleteIsAppliedFirst( final Setting INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; final Setting REROUTE_INTERVAL = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING; - ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); consumer.accept(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 7).put(REROUTE_INTERVAL.getKey(), "42s"), builder); ClusterUpdateSettingsResponse response = builder.get(); @@ -106,7 +106,7 @@ private void testDeleteIsAppliedFirst( assertThat(REROUTE_INTERVAL.get(settingsFunction.apply(response)), equalTo(TimeValue.timeValueSeconds(42))); assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(TimeValue.timeValueSeconds(42))); - ClusterUpdateSettingsRequestBuilder undoBuilder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder undoBuilder = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); consumer.accept( Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*")).put(REROUTE_INTERVAL.getKey(), "43s"), undoBuilder @@ -124,7 +124,7 @@ public void testResetClusterTransientSetting() { final Setting INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; final Setting REROUTE_INTERVAL = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING; - ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 7).build()) .get(); @@ -132,7 +132,7 @@ public void testResetClusterTransientSetting() { assertThat(INITIAL_RECOVERIES.get(response.getTransientSettings()), equalTo(7)); assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(7)); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().putNull(INITIAL_RECOVERIES.getKey())) .get(); @@ -140,7 +140,7 @@ public void testResetClusterTransientSetting() { assertNull(response.getTransientSettings().get(INITIAL_RECOVERIES.getKey())); assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY))); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 8).put(REROUTE_INTERVAL.getKey(), "43s").build()) .get(); @@ -149,7 +149,7 @@ public void testResetClusterTransientSetting() { assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(8)); assertThat(REROUTE_INTERVAL.get(response.getTransientSettings()), equalTo(TimeValue.timeValueSeconds(43))); assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(TimeValue.timeValueSeconds(43))); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*"))) .get(); @@ -164,7 +164,7 @@ public void testResetClusterPersistentSetting() { final Setting INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; final Setting REROUTE_INTERVAL = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING; - ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 9).build()) .get(); @@ -172,7 +172,7 @@ public void testResetClusterPersistentSetting() { assertThat(INITIAL_RECOVERIES.get(response.getPersistentSettings()), equalTo(9)); assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(9)); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull(INITIAL_RECOVERIES.getKey())) .get(); @@ -180,7 +180,7 @@ public void testResetClusterPersistentSetting() { assertThat(INITIAL_RECOVERIES.get(response.getPersistentSettings()), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY))); assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY))); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 10).put(REROUTE_INTERVAL.getKey(), "44s").build()) .get(); @@ -189,7 +189,7 @@ public void testResetClusterPersistentSetting() { assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(10)); assertThat(REROUTE_INTERVAL.get(response.getPersistentSettings()), equalTo(TimeValue.timeValueSeconds(44))); assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(TimeValue.timeValueSeconds(44))); - response = clusterAdmin().prepareUpdateSettings() + response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*"))) .get(); @@ -209,7 +209,7 @@ public void testClusterSettingsUpdateResponse() { Settings transientSettings1 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).build(); Settings persistentSettings1 = Settings.builder().put(key2, value2).build(); - ClusterUpdateSettingsResponse response1 = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response1 = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(transientSettings1) .setPersistentSettings(persistentSettings1) .get(); @@ -223,7 +223,7 @@ public void testClusterSettingsUpdateResponse() { Settings transientSettings2 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).put(key2, value2).build(); Settings persistentSettings2 = Settings.EMPTY; - ClusterUpdateSettingsResponse response2 = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response2 = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(transientSettings2) .setPersistentSettings(persistentSettings2) .get(); @@ -237,7 +237,7 @@ public void testClusterSettingsUpdateResponse() { Settings transientSettings3 = Settings.EMPTY; Settings persistentSettings3 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).put(key2, value2).build(); - ClusterUpdateSettingsResponse response3 = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response3 = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(transientSettings3) .setPersistentSettings(persistentSettings3) .get(); @@ -267,7 +267,7 @@ private void testCanUpdateTracerSettings( final BiConsumer consumer, final Function settingsFunction ) { - ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); consumer.accept( Settings.builder().putList("transport.tracer.include", "internal:index/shard/recovery/*", "internal:gateway/local*"), builder @@ -300,7 +300,10 @@ private void testUpdateSettings( ) { final Setting INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; - ClusterUpdateSettingsRequestBuilder initialBuilder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder initialBuilder = clusterAdmin().prepareUpdateSettings( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); consumer.accept(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 42), initialBuilder); ClusterUpdateSettingsResponse response = initialBuilder.get(); @@ -310,7 +313,10 @@ private void testUpdateSettings( assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(42)); try { - ClusterUpdateSettingsRequestBuilder badBuilder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder badBuilder = clusterAdmin().prepareUpdateSettings( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); consumer.accept(Settings.builder().put(INITIAL_RECOVERIES.getKey(), "whatever"), badBuilder); badBuilder.get(); fail("bogus value"); @@ -321,7 +327,10 @@ private void testUpdateSettings( assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(42)); try { - ClusterUpdateSettingsRequestBuilder badBuilder = clusterAdmin().prepareUpdateSettings(); + ClusterUpdateSettingsRequestBuilder badBuilder = clusterAdmin().prepareUpdateSettings( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT + ); consumer.accept(Settings.builder().put(INITIAL_RECOVERIES.getKey(), -1), badBuilder); badBuilder.get(); fail("bogus value"); @@ -346,9 +355,13 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO if (readOnlyAllowDelete) { settingsBuilder.put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), "true"); } - assertAcked(clusterAdmin().prepareUpdateSettings().setPersistentSettings(settingsBuilder).setTransientSettings(settingsBuilder)); + assertAcked( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settingsBuilder) + .setTransientSettings(settingsBuilder) + ); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); if (readOnly) { assertTrue(Metadata.SETTING_READ_ONLY_SETTING.get(state.getMetadata().transientSettings())); assertTrue(Metadata.SETTING_READ_ONLY_SETTING.get(state.getMetadata().persistentSettings())); @@ -365,7 +378,7 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO .build(); restartNodesOnBrokenClusterState(ClusterState.builder(state).metadata(brokenMeta)); ensureGreen(); // wait for state recovery - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertTrue(state.getMetadata().persistentSettings().getAsBoolean("archived.this.is.unknown", false)); // cannot remove read only block due to archived settings @@ -375,7 +388,9 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO clearOrSetFalse(builder, readOnlyAllowDelete, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING); final IllegalArgumentException e1 = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).setTransientSettings(builder) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(builder) + .setTransientSettings(builder) ); assertTrue(e1.getMessage().contains("unknown setting [archived.this.is.unknown]")); } @@ -383,7 +398,7 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO // fail to clear archived settings with non-archived settings final ClusterBlockException e2 = expectThrows( ClusterBlockException.class, - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().putNull("cluster.routing.allocation.enable")) .setTransientSettings(Settings.builder().putNull("archived.*")) ); @@ -397,7 +412,8 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO // fail to clear archived settings due to cluster read only block final ClusterBlockException e3 = expectThrows( ClusterBlockException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("archived.*")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().putNull("archived.*")) ); if (readOnly) { assertTrue(e3.getMessage().contains("cluster read-only (api)")); @@ -419,7 +435,7 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO } final ClusterBlockException e4 = expectThrows( ClusterBlockException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setPersistentSettings(builder) ); if (readOnly) { assertTrue(e4.getMessage().contains("cluster read-only (api)")); @@ -436,7 +452,7 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO clearOrSetFalse(builder, readOnlyAllowDelete, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING); final ClusterBlockException e5 = expectThrows( ClusterBlockException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setPersistentSettings(builder) ); if (readOnly) { assertTrue(e5.getMessage().contains("cluster read-only (api)")); @@ -450,9 +466,14 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO Settings.Builder builder = Settings.builder().putNull("archived.*"); clearOrSetFalse(builder, readOnly, Metadata.SETTING_READ_ONLY_SETTING); clearOrSetFalse(builder, readOnlyAllowDelete, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING); - assertAcked(clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).setTransientSettings(builder).get()); + assertAcked( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(builder) + .setTransientSettings(builder) + .get() + ); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse(Metadata.SETTING_READ_ONLY_SETTING.get(state.getMetadata().transientSettings())); assertFalse(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(state.getMetadata().transientSettings())); assertFalse(Metadata.SETTING_READ_ONLY_SETTING.get(state.getMetadata().persistentSettings())); @@ -477,7 +498,7 @@ public void testClusterUpdateSettingsWithBlocks() { String key2 = "cluster.routing.allocation.node_concurrent_recoveries"; Settings persistentSettings = Settings.builder().put(key2, "5").build(); - ClusterUpdateSettingsRequestBuilder request = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsRequestBuilder request = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(transientSettings) .setPersistentSettings(persistentSettings); @@ -488,7 +509,9 @@ public void testClusterUpdateSettingsWithBlocks() { // But it's possible to update the settings to update the "cluster.blocks.read_only" setting Settings settings = Settings.builder().putNull(Metadata.SETTING_READ_ONLY_SETTING.getKey()).build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setTransientSettings(settings).get()); + assertAcked( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(settings).get() + ); } finally { setClusterReadOnly(false); @@ -498,12 +521,14 @@ public void testClusterUpdateSettingsWithBlocks() { try { // But it's possible to update the settings to update the "cluster.blocks.read_only" setting Settings settings = Settings.builder().put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true).build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setTransientSettings(settings).get()); + assertAcked( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(settings).get() + ); assertBlocked(request, Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK); } finally { // But it's possible to update the settings to update the "cluster.blocks.read_only" setting Settings s = Settings.builder().putNull(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey()).build(); - assertAcked(clusterAdmin().prepareUpdateSettings().setTransientSettings(s).get()); + assertAcked(clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setTransientSettings(s).get()); } // It should work now @@ -541,7 +566,7 @@ private void testLoggerLevelUpdate(final BiConsumer updating cluster settings"); var future = client(masterNode).admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(BlockingClusterSettingTestPlugin.TEST_BLOCKING_SETTING.getKey(), true).build()) .setMasterNodeTimeout(TimeValue.timeValueMillis(100L)) .execute(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java index 895bd6932fdb..7e9406dfcf09 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java @@ -88,7 +88,7 @@ public void testMultipleIndicesAllocation() { .addAliasAction(AliasActions.add().index("test1").alias("routing_alias").routing("ABC")) .addAliasAction(AliasActions.add().index("test2").alias("routing_alias").routing("EFG")) .get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); ClusterSearchShardsResponse response = safeExecute(new ClusterSearchShardsRequest(TEST_REQUEST_TIMEOUT, "routing_alias")); assertThat(response.getGroups().length, equalTo(2)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 31dd002a6af7..7a66cb3abb7c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -49,11 +49,11 @@ public void testMinimumPerNode() { int negativeShardsPerNode = between(-50_000, 0); try { if (frequently()) { - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, negativeShardsPerNode).build()) .get(); } else { - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, negativeShardsPerNode).build()) .get(); } @@ -67,7 +67,7 @@ public void testMinimumPerNode() { } public void testIndexCreationOverLimit() { - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); @@ -95,12 +95,12 @@ public void testIndexCreationOverLimit() { } catch (IllegalArgumentException e) { verifyException(dataNodes, counts, e); } - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse(clusterState.getMetadata().hasIndex("should-fail")); } public void testIndexCreationOverLimitFromTemplate() { - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); final ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); @@ -126,12 +126,12 @@ public void testIndexCreationOverLimitFromTemplate() { final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareCreate("should-fail")); verifyException(dataNodes, counts, e); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse(clusterState.getMetadata().hasIndex("should-fail")); } public void testIncreaseReplicasOverLimit() { - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); dataNodes = ensureMultipleDataNodes(dataNodes); @@ -158,12 +158,12 @@ public void testIncreaseReplicasOverLimit() { + ";"; assertEquals(expectedError, e.getMessage()); } - Metadata clusterState = clusterAdmin().prepareState().get().getState().metadata(); + Metadata clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); assertEquals(0, clusterState.index("growing-should-fail").getNumberOfReplicas()); } public void testChangingMultipleIndicesOverLimit() { - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); dataNodes = ensureMultipleDataNodes(dataNodes); @@ -219,13 +219,13 @@ public void testChangingMultipleIndicesOverLimit() { + ";"; assertEquals(expectedError, e.getMessage()); } - Metadata clusterState = clusterAdmin().prepareState().get().getState().metadata(); + Metadata clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata(); assertEquals(firstIndexReplicas, clusterState.index("test-1-index").getNumberOfReplicas()); assertEquals(secondIndexReplicas, clusterState.index("test-2-index").getNumberOfReplicas()); } public void testPreserveExistingSkipsCheck() { - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); dataNodes = ensureMultipleDataNodes(dataNodes); @@ -245,7 +245,7 @@ public void testPreserveExistingSkipsCheck() { .setPreserveExisting(true) .setSettings(Settings.builder().put("number_of_replicas", dataNodes)) ); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals(0, clusterState.getMetadata().index("test-index").getNumberOfReplicas()); } @@ -266,7 +266,7 @@ public void testRestoreSnapshotOverLimit() { .setSettings(repoSettings.build()) ); - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); createIndex( "snapshot-index", @@ -330,13 +330,13 @@ public void testRestoreSnapshotOverLimit() { verifyException(dataNodes, counts, e); } ensureGreen(); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse(clusterState.getMetadata().hasIndex("snapshot-index")); } public void testOpenIndexOverLimit() { Client client = client(); - int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + int dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); createIndex( @@ -348,7 +348,7 @@ public void testOpenIndexOverLimit() { .build() ); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertFalse(healthResponse.isTimedOut()); AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test-index-1").get(); @@ -371,7 +371,7 @@ public void testOpenIndexOverLimit() { } catch (IllegalArgumentException e) { verifyException(dataNodes, counts, e); } - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertFalse(clusterState.getMetadata().hasIndex("snapshot-index")); } @@ -379,17 +379,22 @@ private int ensureMultipleDataNodes(int dataNodes) { if (dataNodes == 1) { internalCluster().startNode(dataNode()); assertThat( - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").setLocal(true).get().isTimedOut(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes(">=2") + .setLocal(true) + .get() + .isTimedOut(), equalTo(false) ); - dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); + dataNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes().getDataNodes().size(); } return dataNodes; } private void setShardsPerNode(int shardsPerNode) { try { - ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings() + ClusterUpdateSettingsResponse response = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, shardsPerNode).build()) .get(); assertEquals(shardsPerNode, response.getPersistentSettings().getAsInt(shardsPerNodeKey, -1).intValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index a0fa63aa58ab..6ae7e0f7e84e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -293,7 +293,14 @@ public void testRejoinDocumentExistsInAllShardCopies() throws Exception { NetworkDisruption scheme = addRandomDisruptionType(partitions); scheme.startDisrupting(); ensureStableCluster(2, notIsolatedNode); - assertFalse(client(notIsolatedNode).admin().cluster().prepareHealth("test").setWaitForYellowStatus().get().isTimedOut()); + assertFalse( + client(notIsolatedNode).admin() + .cluster() + .prepareHealth(TEST_REQUEST_TIMEOUT, "test") + .setWaitForYellowStatus() + .get() + .isTimedOut() + ); DocWriteResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test").setSource("field", "value").get(); assertThat(indexResponse.getVersion(), equalTo(1L)); @@ -424,12 +431,12 @@ public boolean validateClusterForming() { }); assertBusy(() -> { - assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut()); + assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).get().isTimedOut()); assertTrue( internalCluster().client(masterNode) .admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes("2") .setTimeout(TimeValue.timeValueSeconds(2)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index cad5c8f524bc..b512f369c76d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -145,7 +145,7 @@ public void testElectMasterWithLatestVersion() throws Exception { isolateAllNodes.stopDisrupting(); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); if (state.metadata().hasIndex("test") == false) { fail("index 'test' was lost. current cluster state: " + state); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index f8bdf17e2cec..601266b50d23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -158,7 +158,13 @@ private String xContentToString(ChunkedToXContent xContent) throws IOException { private void ensureNoMaster(String node) throws Exception { assertBusy( () -> assertNull( - client(node).admin().cluster().state(new ClusterStateRequest().local(true)).get().getState().nodes().getMasterNode() + client(node).admin() + .cluster() + .state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).local(true)) + .get() + .getState() + .nodes() + .getMasterNode() ) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java index 3aa97f79a82d..eeda0257fb0e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/ShardInfoIT.java @@ -113,12 +113,14 @@ private void assertShardInfo(ReplicationResponse response, int expectedTotal, in private void ensureActiveShardCopies(final int shardId, final int copyCount) throws Exception { assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.routingTable().index("idx"), not(nullValue())); assertThat(state.routingTable().index("idx").shard(shardId), not(nullValue())); assertThat(state.routingTable().index("idx").shard(shardId).activeShards().size(), equalTo(copyCount)); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth("idx").setWaitForNoRelocatingShards(true).get(); + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx") + .setWaitForNoRelocatingShards(true) + .get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries("idx").setActiveOnly(true).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java index 8c6abc3e14cd..00a4f170cf7e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -153,7 +153,7 @@ public void testUpgradeDataFolder() throws IOException, InterruptedException { String node = internalCluster().startNode(); prepareCreate("test").get(); indexRandom(true, prepareIndex("test").setId("1").setSource("{}", XContentType.JSON)); - String nodeId = clusterAdmin().prepareState().get().getState().nodes().getMasterNodeId(); + String nodeId = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNodeId(); final Settings dataPathSettings = internalCluster().dataPathSettings(node); internalCluster().stopRandomDataNode(); @@ -235,7 +235,7 @@ public void testUpgradeDataFolder() throws IOException, InterruptedException { dataPaths.forEach(path -> assertTrue(Files.isDirectory(path.resolve("nodes")))); internalCluster().startNode(dataPathSettings); dataPaths.forEach(path -> assertTrue(Files.isRegularFile(path.resolve("nodes")))); - assertEquals(nodeId, clusterAdmin().prepareState().get().getState().nodes().getMasterNodeId()); + assertEquals(nodeId, clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNodeId()); assertTrue(indexExists("test")); ensureYellow("test"); assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java index 24bf198b7b42..a695c46bcbfa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/features/ClusterFeaturesIT.java @@ -32,7 +32,7 @@ public void testClusterHasFeatures() { assertThat(service.getNodeFeatures(), hasKey(FeatureService.FEATURES_SUPPORTED.id())); // check the nodes all have a feature in their cluster state (there should always be features_supported) - var response = clusterAdmin().state(new ClusterStateRequest().clear().nodes(true)).actionGet(); + var response = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).clear().nodes(true)).actionGet(); var features = response.getState().clusterFeatures().nodeFeatures(); Set missing = features.entrySet() .stream() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index e05bda69d2c9..92c1e9729b46 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -92,7 +92,7 @@ public void testMappingMetadataParsed() throws Exception { .get(); logger.info("--> verify meta _routing required exists"); - MappingMetadata mappingMd = clusterAdmin().prepareState().get().getState().metadata().index("test").mapping(); + MappingMetadata mappingMd = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").mapping(); assertThat(mappingMd.routingRequired(), equalTo(true)); logger.info("--> restarting nodes..."); @@ -102,7 +102,7 @@ public void testMappingMetadataParsed() throws Exception { ensureYellow(); logger.info("--> verify meta _routing required exists"); - mappingMd = clusterAdmin().prepareState().get().getState().metadata().index("test").mapping(); + mappingMd = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").mapping(); assertThat(mappingMd.routingRequired(), equalTo(true)); } @@ -118,7 +118,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> waiting for green status"); ensureGreen(); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -132,7 +132,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -158,7 +158,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> verifying that the state is green"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -172,7 +172,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -181,7 +181,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> waiting for two nodes and green status"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -199,7 +199,7 @@ public void testSimpleOpenClose() throws Exception { logger.info("--> waiting for green status"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(test.numPrimaries)); assertThat( @@ -233,11 +233,14 @@ public Settings onNodeStopped(String nodeName) { }); logger.info("--> waiting for test index to be created"); - ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test").get(); + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .setIndices("test") + .get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify we have an index"); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setIndices("test").get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setIndices("test").get(); assertThat(clusterStateResponse.getState().metadata().hasIndex("test"), equalTo(true)); } @@ -264,7 +267,7 @@ public void testTwoNodesSingleDoc() throws Exception { prepareIndex("test").setId("1").setSource("field1", "value1").setRefreshPolicy(IMMEDIATE).get(); logger.info("--> waiting for green status"); - ClusterHealthResponse health = clusterAdmin().prepareHealth() + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes("2") @@ -279,7 +282,7 @@ public void testTwoNodesSingleDoc() throws Exception { logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -287,7 +290,11 @@ public void testTwoNodesSingleDoc() throws Exception { indicesAdmin().prepareOpen("test").get(); logger.info("--> waiting for green status"); - health = clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").get(); + health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setWaitForNodes("2") + .get(); assertThat(health.isTimedOut(), equalTo(false)); logger.info("--> verify 1 doc in the index"); @@ -337,7 +344,8 @@ public Settings onNodeStopped(final String nodeName) throws Exception { logger.info("--> wait until all nodes are back online"); clusterAdmin().health( - new ClusterHealthRequest(new String[] {}).waitForEvents(Priority.LANGUID).waitForNodes(Integer.toString(numNodes)) + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForEvents(Priority.LANGUID) + .waitForNodes(Integer.toString(numNodes)) ).actionGet(); logger.info("--> waiting for green status"); @@ -372,13 +380,13 @@ public void testRecoverBrokenIndexMetadata() throws Exception { } else { internalCluster().startNode(); clusterAdmin().health( - new ClusterHealthRequest(new String[] {}).waitForGreenStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) .waitForNodes("2") ).actionGet(); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata metadata = state.getMetadata().index("test"); final IndexMetadata.Builder brokenMeta = IndexMetadata.builder(metadata) @@ -395,7 +403,7 @@ public void testRecoverBrokenIndexMetadata() throws Exception { // check that the cluster does not keep reallocating shards assertBusy(() -> { - final RoutingTable routingTable = clusterAdmin().prepareState().get().getState().routingTable(); + final RoutingTable routingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().routingTable(); final IndexRoutingTable indexRoutingTable = routingTable.index("test"); assertNotNull(indexRoutingTable); for (int i = 0; i < indexRoutingTable.size(); i++) { @@ -410,7 +418,7 @@ public void testRecoverBrokenIndexMetadata() throws Exception { }, 60, TimeUnit.SECONDS); indicesAdmin().prepareClose("test").get(); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals(IndexMetadata.State.CLOSE, state.getMetadata().index(metadata.getIndex()).getState()); assertEquals("boolean", state.getMetadata().index(metadata.getIndex()).getSettings().get("archived.index.similarity.BM25.type")); // try to open it with the broken setting - fail again! @@ -449,13 +457,13 @@ public void testRecoverMissingAnalyzer() throws Exception { } else { internalCluster().startNode(); clusterAdmin().health( - new ClusterHealthRequest(new String[] {}).waitForGreenStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) .waitForNodes("2") ).actionGet(); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexMetadata metadata = state.getMetadata().index("test"); final IndexMetadata.Builder brokenMeta = IndexMetadata.builder(metadata) @@ -464,7 +472,7 @@ public void testRecoverMissingAnalyzer() throws Exception { // check that the cluster does not keep reallocating shards assertBusy(() -> { - final RoutingTable routingTable = clusterAdmin().prepareState().get().getState().routingTable(); + final RoutingTable routingTable = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().routingTable(); final IndexRoutingTable indexRoutingTable = routingTable.index("test"); assertNotNull(indexRoutingTable); for (int i = 0; i < indexRoutingTable.size(); i++) { @@ -497,13 +505,13 @@ public void testArchiveBrokenClusterSettings() throws Exception { } else { internalCluster().startNode(); clusterAdmin().health( - new ClusterHealthRequest(new String[] {}).waitForGreenStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) .waitForNodes("2") ).actionGet(); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final Metadata metadata = state.getMetadata(); final Metadata brokenMeta = Metadata.builder(metadata) @@ -518,7 +526,7 @@ public void testArchiveBrokenClusterSettings() throws Exception { restartNodesOnBrokenClusterState(ClusterState.builder(state).metadata(brokenMeta)); ensureYellow("test"); // wait for state recovery - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertEquals("true", state.metadata().persistentSettings().get("archived.this.is.unknown")); assertEquals( "broken", @@ -528,7 +536,7 @@ public void testArchiveBrokenClusterSettings() throws Exception { // delete these settings updateClusterSettings(Settings.builder().putNull("archived.*")); - state = clusterAdmin().prepareState().get().getState(); + state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNull(state.metadata().persistentSettings().get("archived.this.is.unknown")); assertNull( state.metadata().persistentSettings().get("archived." + ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java index 1e34967073ad..94824db66d15 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java @@ -69,7 +69,7 @@ public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Excepti logger.debug("relocating index..."); updateIndexSettings(Settings.builder().put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node2), index); - clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNoRelocatingShards(true).get(); ensureGreen(); assertIndexDirectoryDeleted(node1, resolveIndex); assertIndexInMetaState(node2, index); @@ -98,7 +98,7 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { logger.info("--> close index"); indicesAdmin().prepareClose(index).get(); // close the index - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(clusterStateResponse.getState().getMetadata().index(index).getState().name(), equalTo(IndexMetadata.State.CLOSE.name())); // update the mapping. this should cause the new meta data to be written although index is closed diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java index 15a72e3534b5..f1b06e4efc97 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -62,7 +62,7 @@ public void doAfterNodes(int numNodes, final Client activeClient) throws Excepti ClusterHealthResponse clusterHealth = activeClient.admin() .cluster() .health( - new ClusterHealthRequest(new String[] {}).waitForYellowStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForYellowStatus() .waitForNodes("2") .waitForActiveShards(test.numPrimaries * 2) ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java index b55dd5e207c4..4281562b6479 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java @@ -35,7 +35,7 @@ public Set waitForNoBlocksOnNode(TimeValue timeout, Client nodeCli do { blocks = nodeClient.admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setLocal(true) .get() .getState() @@ -55,33 +55,75 @@ public void testRecoverAfterDataNodes() { logger.info("--> start master_node (1)"); Client master1 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(masterOnlyNode())); assertThat( - master1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + master1.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); logger.info("--> start data_node (1)"); Client data1 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(dataOnlyNode())); assertThat( - master1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + master1.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - data1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + data1.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); logger.info("--> start master_node (2)"); Client master2 = startNode(Settings.builder().put(RECOVER_AFTER_DATA_NODES_SETTING.getKey(), 2).put(masterOnlyNode())); assertThat( - master2.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + master2.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - data1.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + data1.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - master2.admin().cluster().prepareState().setLocal(true).get().getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), + master2.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .setLocal(true) + .get() + .getState() + .blocks() + .global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 26573644790f..193f025e6843 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -142,7 +142,7 @@ private Map assertAndCapturePrimaryTerms(Map pre previousTerms = new HashMap<>(); } final Map result = new HashMap<>(); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (IndexMetadata indexMetadata : state.metadata().indices().values()) { final String index = indexMetadata.getIndex().getName(); final long[] previous = previousTerms.get(index); @@ -316,7 +316,10 @@ public void testTwoNodeFirstNodeCleared() throws Exception { Map primaryTerms = assertAndCapturePrimaryTerms(null); - client().execute(TransportAddVotingConfigExclusionsAction.TYPE, new AddVotingConfigExclusionsRequest(firstNode)).get(); + client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT, firstNode) + ).get(); internalCluster().fullRestart(new RestartCallback() { @Override @@ -342,7 +345,8 @@ public boolean clearData(String nodeName) { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 2); } - client().execute(TransportClearVotingConfigExclusionsAction.TYPE, new ClearVotingConfigExclusionsRequest()).get(); + client().execute(TransportClearVotingConfigExclusionsAction.TYPE, new ClearVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT)) + .get(); } public void testLatestVersionLoaded() throws Exception { @@ -364,7 +368,7 @@ public void testLatestVersionLoaded() throws Exception { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 2); } - String metadataUuid = clusterAdmin().prepareState().execute().get().getState().getMetadata().clusterUUID(); + String metadataUuid = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).execute().get().getState().getMetadata().clusterUUID(); assertThat(metadataUuid, not(equalTo("_na_"))); logger.info("--> closing first node, and indexing more data to the second node"); @@ -420,13 +424,16 @@ public void testLatestVersionLoaded() throws Exception { logger.info("--> running cluster_health (wait for the shards to startup)"); ensureGreen(); - assertThat(clusterAdmin().prepareState().execute().get().getState().getMetadata().clusterUUID(), equalTo(metadataUuid)); + assertThat( + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).execute().get().getState().getMetadata().clusterUUID(), + equalTo(metadataUuid) + ); for (int i = 0; i < 10; i++) { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 3); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.metadata().templates().get("template_1").patterns(), equalTo(Collections.singletonList("te*"))); assertThat(state.metadata().index("test").getAliases().get("test_alias"), notNullValue()); assertThat(state.metadata().index("test").getAliases().get("test_alias").filter(), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index ae0a1e15923e..02c566f49e2b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -202,7 +202,7 @@ public void testRecentPrimaryInformation() throws Exception { ); internalCluster().startDataOnlyNode(nodeWithReplicaSettings); // need to wait for events to ensure the reroute has happened since we perform it async when a new node joins. - clusterAdmin().prepareHealth(indexName).setWaitForYellowStatus().setWaitForEvents(Priority.LANGUID).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).setWaitForYellowStatus().setWaitForEvents(Priority.LANGUID).get(); blockRecovery.countDown(); ensureGreen(indexName); assertThat(internalCluster().nodesInclude(indexName), hasItem(newNode)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java index 660d6028486a..30fc7e263a4c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthMetadataServiceIT.java @@ -253,7 +253,7 @@ private static void updateSettings(InternalTestCluster internalCluster, Settings internalCluster.client() .admin() .cluster() - .updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(settings)) + .updateSettings(new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings(settings)) .actionGet(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java index 14697cc6533c..a5931f29b9ff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java @@ -88,7 +88,7 @@ private void waitForAllNodesToReportHealth() throws Exception { ClusterState state = internalCluster().client() .admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .clear() .setMetadata(true) .setNodes(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java index 5a5fad9be3ef..b6477a7e1a6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/UpdateHealthInfoCacheIT.java @@ -136,13 +136,22 @@ private void decreasePollingInterval(InternalTestCluster internalCluster) { .admin() .cluster() .updateSettings( - new ClusterUpdateSettingsRequest().persistentSettings( + new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings( Settings.builder().put(LocalHealthMonitor.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(10)) ) ); } private static Map getNodes(InternalTestCluster internalCluster) { - return internalCluster.client().admin().cluster().prepareState().clear().setNodes(true).get().getState().getNodes().getNodes(); + return internalCluster.client() + .admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get() + .getState() + .getNodes() + .getNodes(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java index 216d5e25218e..026bb00d69bb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java @@ -12,12 +12,8 @@ import org.elasticsearch.action.get.GetRequestBuilder; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; -import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; @@ -28,7 +24,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.XContentType; import org.junit.After; import java.io.IOException; @@ -56,10 +51,9 @@ protected Collection> nodePlugins() { public void cleanUpPipelines() { indicesAdmin().prepareDelete("*").get(); - final GetPipelineResponse response = clusterAdmin().prepareGetPipeline("default_pipeline", "final_pipeline", "request_pipeline") - .get(); + final GetPipelineResponse response = getPipelines("default_pipeline", "final_pipeline", "request_pipeline"); for (final PipelineConfiguration pipeline : response.pipelines()) { - clusterAdmin().deletePipeline(new DeletePipelineRequest(pipeline.getId())).actionGet(); + deletePipeline(pipeline.getId()); } } @@ -67,9 +61,8 @@ public void testFinalPipelineCantChangeDestination() { final Settings settings = Settings.builder().put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline").build(); createIndex("index", settings); - final BytesReference finalPipelineBody = new BytesArray(""" + putJsonPipeline("final_pipeline", """ {"processors": [{"changing_dest": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final IllegalStateException e = expectThrows( IllegalStateException.class, @@ -87,9 +80,8 @@ public void testFinalPipelineCantRerouteDestination() { final Settings settings = Settings.builder().put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline").build(); createIndex("index", settings); - final BytesReference finalPipelineBody = new BytesArray(""" + putJsonPipeline("final_pipeline", """ {"processors": [{"reroute": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final IllegalStateException e = expectThrows( IllegalStateException.class, @@ -110,13 +102,11 @@ public void testFinalPipelineOfOldDestinationIsNotInvoked() { .build(); createIndex("index", settings); - BytesReference defaultPipelineBody = new BytesArray(""" + putJsonPipeline("default_pipeline", """ {"processors": [{"changing_dest": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)).actionGet(); - BytesReference finalPipelineBody = new BytesArray(""" + putJsonPipeline("final_pipeline", """ {"processors": [{"final": {"exists":"no_such_field"}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) @@ -136,13 +126,11 @@ public void testFinalPipelineOfNewDestinationIsInvoked() { settings = Settings.builder().put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline").build(); createIndex("target", settings); - BytesReference defaultPipelineBody = new BytesArray(""" + putJsonPipeline("default_pipeline", """ {"processors": [{"changing_dest": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)).actionGet(); - BytesReference finalPipelineBody = new BytesArray(""" + putJsonPipeline("final_pipeline", """ {"processors": [{"final": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) @@ -162,13 +150,11 @@ public void testDefaultPipelineOfNewDestinationIsNotInvoked() { settings = Settings.builder().put(IndexSettings.DEFAULT_PIPELINE.getKey(), "target_default_pipeline").build(); createIndex("target", settings); - BytesReference defaultPipelineBody = new BytesArray(""" + putJsonPipeline("default_pipeline", """ {"processors": [{"changing_dest": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)).actionGet(); - BytesReference targetPipeline = new BytesArray(""" + putJsonPipeline("target_default_pipeline", """ {"processors": [{"final": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("target_default_pipeline", targetPipeline, XContentType.JSON)).actionGet(); DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) @@ -188,13 +174,11 @@ public void testDefaultPipelineOfRerouteDestinationIsInvoked() { settings = Settings.builder().put(IndexSettings.DEFAULT_PIPELINE.getKey(), "target_default_pipeline").build(); createIndex("target", settings); - BytesReference defaultPipelineBody = new BytesArray(""" + putJsonPipeline("default_pipeline", """ {"processors": [{"reroute": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)).actionGet(); - BytesReference targetPipeline = new BytesArray(""" + putJsonPipeline("target_default_pipeline", """ {"processors": [{"final": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("target_default_pipeline", targetPipeline, XContentType.JSON)).actionGet(); DocWriteResponse indexResponse = prepareIndex("index").setId("1") .setSource(Map.of("field", "value")) @@ -214,13 +198,11 @@ public void testAvoidIndexingLoop() { settings = Settings.builder().put(IndexSettings.DEFAULT_PIPELINE.getKey(), "target_default_pipeline").build(); createIndex("target", settings); - BytesReference defaultPipelineBody = new BytesArray(""" + putJsonPipeline("default_pipeline", """ {"processors": [{"reroute": {"dest": "target"}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)).actionGet(); - BytesReference targetPipeline = new BytesArray(""" + putJsonPipeline("target_default_pipeline", """ {"processors": [{"reroute": {"dest": "index"}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("target_default_pipeline", targetPipeline, XContentType.JSON)).actionGet(); IllegalStateException exception = expectThrows( IllegalStateException.class, @@ -245,12 +227,10 @@ public void testFinalPipeline() { } public void testRequestPipelineAndFinalPipeline() { - final BytesReference requestPipelineBody = new BytesArray(""" + putJsonPipeline("request_pipeline", """ {"processors": [{"request": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("request_pipeline", requestPipelineBody, XContentType.JSON)).actionGet(); - final BytesReference finalPipelineBody = new BytesArray(""" + putJsonPipeline("final_pipeline", """ {"processors": [{"final": {"exists":"request"}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final Settings settings = Settings.builder().put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline").build(); createIndex("index", settings); final IndexRequestBuilder index = prepareIndex("index").setId("1"); @@ -270,12 +250,10 @@ public void testRequestPipelineAndFinalPipeline() { } public void testDefaultAndFinalPipeline() { - final BytesReference defaultPipelineBody = new BytesArray(""" + putJsonPipeline("default_pipeline", """ {"processors": [{"default": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)).actionGet(); - final BytesReference finalPipelineBody = new BytesArray(""" + putJsonPipeline("final_pipeline", """ {"processors": [{"final": {"exists":"default"}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final Settings settings = Settings.builder() .put(IndexSettings.DEFAULT_PIPELINE.getKey(), "default_pipeline") .put(IndexSettings.FINAL_PIPELINE.getKey(), "final_pipeline") @@ -297,12 +275,10 @@ public void testDefaultAndFinalPipeline() { } public void testDefaultAndFinalPipelineFromTemplates() { - final BytesReference defaultPipelineBody = new BytesArray(""" + putJsonPipeline("default_pipeline", """ {"processors": [{"default": {}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("default_pipeline", defaultPipelineBody, XContentType.JSON)).actionGet(); - final BytesReference finalPipelineBody = new BytesArray(""" + putJsonPipeline("final_pipeline", """ {"processors": [{"final": {"exists":"default"}}]}"""); - clusterAdmin().putPipeline(new PutPipelineRequest("final_pipeline", finalPipelineBody, XContentType.JSON)).actionGet(); final int lowOrder = randomIntBetween(0, Integer.MAX_VALUE - 1); final int highOrder = randomIntBetween(lowOrder + 1, Integer.MAX_VALUE); final int finalPipelineOrder; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexingPressureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexingPressureIT.java index da89f3252bec..97dd3d9723d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexingPressureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexingPressureIT.java @@ -396,7 +396,15 @@ public Settings onNodeStopped(String nodeName) { } private String getCoordinatingOnlyNode() { - return clusterAdmin().prepareState().get().getState().nodes().getCoordinatingOnlyNodes().values().iterator().next().getName(); + return clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getCoordinatingOnlyNodes() + .values() + .iterator() + .next() + .getName(); } private Tuple getPrimaryReplicaNodeNames() { @@ -413,7 +421,7 @@ private Tuple getPrimaryReplicaNodeNames() { .findAny() .get() .currentNodeId(); - DiscoveryNodes nodes = clusterAdmin().prepareState().get().getState().nodes(); + DiscoveryNodes nodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes(); String primaryName = nodes.get(primaryId).getName(); String replicaName = nodes.get(replicaId).getName(); return new Tuple<>(primaryName, replicaName); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java index 53f632f6ba8d..749cf73e822e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/GlobalCheckpointSyncIT.java @@ -84,7 +84,7 @@ public void testPostOperationGlobalCheckpointSync() throws Exception { public void testBackgroundGlobalCheckpointSync() throws Exception { runGlobalCheckpointSyncTest(TimeValue.timeValueSeconds(randomIntBetween(1, 3)), client -> { // prevent global checkpoint syncs between all nodes - final DiscoveryNodes nodes = client.admin().cluster().prepareState().get().getState().getNodes(); + final DiscoveryNodes nodes = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes(); for (final DiscoveryNode node : nodes) { for (final DiscoveryNode other : nodes) { if (node == other) { @@ -105,7 +105,7 @@ public void testBackgroundGlobalCheckpointSync() throws Exception { } }, client -> { // restore global checkpoint syncs between all nodes - final DiscoveryNodes nodes = client.admin().cluster().prepareState().get().getState().getNodes(); + final DiscoveryNodes nodes = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getNodes(); for (final DiscoveryNode node : nodes) { for (final DiscoveryNode other : nodes) { if (node == other) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index c9906ccf1fbe..5a4785252bf9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -558,7 +558,7 @@ private void runWaitForShardsTest( .build(); assertAcked(prepareCreate("index").setSettings(settings)); ensureYellowAndNoInitializingShards("index"); - assertFalse(clusterAdmin().prepareHealth("index").setWaitForActiveShards(numDataNodes).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "index").setWaitForActiveShards(numDataNodes).get().isTimedOut()); final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId(); final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index bdfe629f4bab..b7dbcf42630e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -194,7 +194,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { waitNoPendingTasksOnAll(); String nodeId = null; - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final DiscoveryNodes nodes = state.nodes(); for (Map.Entry cursor : nodes.getNodes().entrySet()) { final String name = cursor.getValue().getName(); @@ -350,7 +350,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { }); String primaryNodeId = null; - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final DiscoveryNodes nodes = state.nodes(); for (Map.Entry cursor : nodes.getNodes().entrySet()) { final String name = cursor.getValue().getName(); @@ -524,7 +524,7 @@ public void testResolvePath() throws Exception { ensureGreen(indexName); final Map nodeNameToNodeId = new HashMap<>(); - final ClusterState state = clusterAdmin().prepareState().get().getState(); + final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final DiscoveryNodes nodes = state.nodes(); for (Map.Entry cursor : nodes.getNodes().entrySet()) { nodeNameToNodeId.put(cursor.getValue().getName(), cursor.getKey()); @@ -569,7 +569,7 @@ public void testResolvePath() throws Exception { } private Path getPathToShardData(String indexName, String dirSuffix) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable() .activePrimaryShardsGrouped(new String[] { indexName }, false); List iterators = iterableAsArrayList(shardIterators); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index 4bd8fadc9309..f3b888022127 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -180,14 +180,18 @@ public void testCorruptFileAndRecover() throws InterruptedException, IOException */ setReplicaCount(2, "test"); ClusterHealthResponse health = clusterAdmin().health( - new ClusterHealthRequest("test").waitForGreenStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "test").waitForGreenStatus() // sometimes due to cluster rebalancing and random settings default timeout is just not enough. .masterNodeTimeout(TimeValue.timeValueMinutes(5)) .timeout(TimeValue.timeValueMinutes(5)) .waitForNoRelocatingShards(true) ).actionGet(); if (health.isTimedOut()) { - logger.info("cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), getClusterPendingTasks()); + logger.info( + "cluster state:\n{}\n{}", + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), + getClusterPendingTasks() + ); assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false)); } assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -288,18 +292,24 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted ClusterRerouteUtils.reroute(client()); boolean didClusterTurnRed = waitUntil(() -> { - ClusterHealthStatus test = clusterAdmin().health(new ClusterHealthRequest("test")).actionGet().getStatus(); + ClusterHealthStatus test = clusterAdmin().health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "test")) + .actionGet() + .getStatus(); return test == ClusterHealthStatus.RED; }, 5, TimeUnit.MINUTES);// sometimes on slow nodes the replication / recovery is just dead slow - final ClusterHealthResponse response = clusterAdmin().health(new ClusterHealthRequest("test")).get(); + final ClusterHealthResponse response = clusterAdmin().health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "test")).get(); if (response.getStatus() != ClusterHealthStatus.RED) { logger.info("Cluster turned red in busy loop: {}", didClusterTurnRed); - logger.info("cluster state:\n{}\n{}", clusterAdmin().prepareState().get().getState(), getClusterPendingTasks()); + logger.info( + "cluster state:\n{}\n{}", + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), + getClusterPendingTasks() + ); } assertThat(response.getStatus(), is(ClusterHealthStatus.RED)); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable() .activePrimaryShardsGrouped(new String[] { "test" }, false); for (ShardIterator iterator : shardIterators) { @@ -509,7 +519,7 @@ public void onTimeout(TimeValue timeout) { } private void assertThatAllShards(String index, Consumer verifier) { - var clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().routingTable(true)).actionGet(); + var clusterStateResponse = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).routingTable(true)).actionGet(); var indexRoutingTable = clusterStateResponse.getState().getRoutingTable().index(index); for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { verifier.accept(indexRoutingTable.shard(shardId)); @@ -655,7 +665,7 @@ public void testReplicaCorruption() throws Exception { } private int numShards(String... index) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(index, false); return shardIterators.size(); } @@ -682,7 +692,7 @@ private ShardRouting corruptRandomPrimaryFile() throws IOException { } private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFiles) throws IOException { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Index test = state.metadata().index("test").getIndex(); GroupShardsIterator shardIterators = state.getRoutingTable() .activePrimaryShardsGrouped(new String[] { "test" }, false); @@ -738,7 +748,7 @@ private static boolean isPerSegmentFile(String fileName) { public List listShardFiles(ShardRouting routing) throws IOException { NodesStatsResponse nodeStatses = clusterAdmin().prepareNodesStats(routing.currentNodeId()).setFs(true).get(); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final Index test = state.metadata().index("test").getIndex(); assertThat(routing.toString(), nodeStatses.getNodes().size(), equalTo(1)); List files = new ArrayList<>(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java index 143ffedeefc5..45b9091ab255 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java @@ -144,7 +144,7 @@ private SearchRequestBuilder addSuggestions(SearchRequestBuilder request, int i) } private Set nodeIdsWithIndex(String... indices) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); Set nodes = new HashSet<>(); for (ShardIterator shardIterator : allAssignedShardsGrouped) { @@ -159,7 +159,7 @@ private Set nodeIdsWithIndex(String... indices) { } protected int numAssignedShards(String... indices) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); return allAssignedShardsGrouped.size(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java index 5c4cdc8cde85..8ea707c0a26b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionIT.java @@ -58,7 +58,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { CreateIndexResponse createIndexResponse = indicesAdmin().create(new CreateIndexRequest("test").settings(settings)).actionGet(); assertAcked(createIndexResponse); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11)); @@ -68,7 +68,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { // first wait for 2 nodes in the cluster logger.info("Waiting for replicas to be assigned"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("2") .setWaitForNoRelocatingShards(true) @@ -83,7 +83,9 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { ClusterRerouteUtils.reroute(client()); clusterHealth = clusterAdmin().health( - new ClusterHealthRequest(new String[] {}).waitForGreenStatus().waitForNodes("2").waitForNoRelocatingShards(true) + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForGreenStatus() + .waitForNodes("2") + .waitForNoRelocatingShards(true) ).actionGet(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); @@ -94,7 +96,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { assertThat(clusterHealth.getActiveShards(), equalTo(22)); assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNodesPresent(clusterState.getRoutingNodes(), node1, node2); routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0)); @@ -109,7 +111,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { // first wait for 3 nodes in the cluster logger.info("Waiting for replicas to be assigned"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("3") .setWaitForNoRelocatingShards(true) @@ -123,7 +125,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join) ClusterRerouteUtils.reroute(client()); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("3") .setWaitForNoRelocatingShards(true) @@ -138,7 +140,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { assertThat(clusterHealth.getActiveShards(), equalTo(22)); assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNodesPresent(clusterState.getRoutingNodes(), node1, node2, node3); routingNodeEntry1 = clusterState.getRoutingNodes().node(node1); @@ -165,7 +167,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { internalCluster().stopNode(server_1); // verify health logger.info("Running Cluster Health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("2") .setWaitForNoRelocatingShards(true) @@ -177,7 +179,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { ClusterRerouteUtils.reroute(client()); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("2") .setWaitForNoRelocatingShards(true) @@ -189,7 +191,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { assertThat(clusterHealth.getActiveShards(), equalTo(22)); assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNodesPresent(clusterState.getRoutingNodes(), node3, node2); routingNodeEntry2 = clusterState.getRoutingNodes().node(node2); routingNodeEntry3 = clusterState.getRoutingNodes().node(node3); @@ -207,7 +209,7 @@ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception { AcknowledgedResponse deleteIndexResponse = indicesAdmin().prepareDelete("test").get(); assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true)); - clusterState = clusterAdmin().prepareState().get().getState(); + clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertNodesPresent(clusterState.getRoutingNodes(), node3, node2); routingNodeEntry2 = clusterState.getRoutingNodes().node(node2); assertThat(routingNodeEntry2.isEmpty(), equalTo(true)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java index fb22aaa3747c..69e982a30b35 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/DateMathIndexExpressionsIntegrationIT.java @@ -177,7 +177,7 @@ public void testCreateIndexWithDateMathExpression() { assertEquals(dateMathExp3, response.getSetting(index3, IndexMetadata.SETTING_INDEX_PROVIDED_NAME)); }); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().index(index1), notNullValue()); assertThat(clusterState.metadata().index(index2), notNullValue()); assertThat(clusterState.metadata().index(index3), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index e9e88a2d6b76..325cd27f0090 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -97,7 +97,7 @@ public void beforeIndexCreated(Index index, Settings indexSettings) { fail("should have thrown an exception during creation"); } catch (Exception e) { assertTrue(e.getMessage().contains("failing on purpose")); - assertFalse(clusterAdmin().prepareState().get().getState().routingTable().hasIndex("failed")); + assertFalse(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().routingTable().hasIndex("failed")); } } @@ -120,7 +120,7 @@ public void beforeIndexCreated(Index index, Settings indexSettings) { ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("index1", 0, node1, node2)); ensureGreen("index1"); - var state = clusterAdmin().prepareState().get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); logger.info("Final routing is {}", state.getRoutingNodes().toString()); var shard = state.routingTable().index("index1").shard(0).primaryShard(); assertThat(shard, notNullValue()); @@ -148,13 +148,13 @@ public void beforeIndexCreated(Index index, Settings indexSettings) { // await all relocation attempts are exhausted assertBusy(() -> { - var state = clusterAdmin().prepareState().get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); var shard = state.routingTable().index("index1").shard(0).primaryShard(); assertThat(shard, notNullValue()); assertThat(shard.relocationFailureInfo().failedRelocations(), equalTo(maxAttempts)); }); // ensure the shard remain started - var state = clusterAdmin().prepareState().get().getState(); + var state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); logger.info("Final routing is {}", state.getRoutingNodes().toString()); var shard = state.routingTable().index("index1").shard(0).primaryShard(); assertThat(shard, notNullValue()); @@ -177,7 +177,7 @@ public void testIndexStateShardChanged() throws Throwable { fail("should have thrown an exception"); } catch (ElasticsearchException e) { assertTrue(e.getMessage().contains("failing on purpose")); - ClusterStateResponse resp = clusterAdmin().prepareState().get(); + ClusterStateResponse resp = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertFalse(resp.getState().routingTable().indicesRouting().keySet().contains("failed")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java index c4be9568f8ba..1ca8fb315b09 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ResolveClusterIT.java @@ -677,7 +677,7 @@ private Map setupThreeClusters(boolean useAlias) throws IOExcept assertFalse( client(REMOTE_CLUSTER_1).admin() .cluster() - .prepareHealth(remoteIndex1) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex1) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() @@ -715,7 +715,7 @@ private Map setupThreeClusters(boolean useAlias) throws IOExcept assertFalse( client(REMOTE_CLUSTER_2).admin() .cluster() - .prepareHealth(remoteIndex2) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex2) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java index 28e89f459055..d263a9d65889 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/cluster/ShardLockFailureIT.java @@ -106,7 +106,7 @@ public void assertMatched() {} updateIndexSettings(Settings.builder().putNull(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name"), indexName); ensureYellow(indexName); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); - assertEquals(ClusterHealthStatus.YELLOW, clusterAdmin().prepareHealth(indexName).get().getStatus()); + assertEquals(ClusterHealthStatus.YELLOW, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).get().getStatus()); mockLog.assertAllExpectationsMatched(); } @@ -153,7 +153,7 @@ public void testShardLockTimeout() throws Exception { updateIndexSettings(Settings.builder().putNull(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name"), indexName); assertBusy(mockLog::assertAllExpectationsMatched); - final var clusterHealthResponse = clusterAdmin().prepareHealth(indexName) + final var clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueSeconds(10)) .setWaitForNoInitializingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java index 937addb473f8..c80f13861e83 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.indices.mapping; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperParsingException; @@ -33,6 +34,8 @@ protected boolean forbidPrivateIndexSettings() { * contains unknown parameters. We were able to create those templates in 7.x still, so we need * to be able to index new documents into them. Indexing should issue a deprecation warning though. */ + @UpdateForV9 + @AwaitsFix(bugUrl = "this is testing 7.x specific compatibility which may be n/a now after 9.0 bump") public void testBWCMalformedDynamicTemplate() { // this parameter is not supported by "keyword" field type String mapping = """ diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java index 720f48754519..57f09e1ed2bb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java @@ -62,7 +62,7 @@ public void testSimpleGetMappings() throws Exception { indicesAdmin().prepareCreate("indexa").setMapping(getMappingForType()).get(); indicesAdmin().prepareCreate("indexb").setMapping(getMappingForType()).get(); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 0008ec1f9cbd..3d240627cf23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -62,7 +62,7 @@ public void testDynamicUpdates() throws Exception { indicesAdmin().prepareCreate("test") .setSettings(indexSettings(1, 0).put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE)) .get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); updateClusterSettings( Settings.builder().put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(5)) ); @@ -100,7 +100,7 @@ public void testUpdateMappingWithoutType() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(1, 0)).setMapping(""" {"properties":{"body":{"type":"text"}}} """).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" {"properties":{"date":{"type":"integer"}}} @@ -115,7 +115,7 @@ public void testUpdateMappingWithoutType() { public void testUpdateMappingWithoutTypeMultiObjects() { createIndex("test", 1, 0); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" {"properties":{"date":{"type":"integer"}}}""", XContentType.JSON).get(); @@ -131,7 +131,7 @@ public void testUpdateMappingWithConflicts() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(2, 0)).setMapping(""" {"properties":{"body":{"type":"text"}}} """).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); try { indicesAdmin().preparePutMapping("test").setSource(""" @@ -163,7 +163,7 @@ public void testUpdateMappingWithNormsConflicts() { public void testUpdateMappingNoChanges() { indicesAdmin().prepareCreate("test").setSettings(indexSettings(2, 0)).setMapping(""" {"properties":{"body":{"type":"text"}}}""").get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); AcknowledgedResponse putMappingResponse = indicesAdmin().preparePutMapping("test").setSource(""" {"_doc":{"properties":{"body":{"type":"text"}}}} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index 705fb879e912..e547ae5a46de 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -319,7 +319,7 @@ public void testCanResetUnreasonableSettings() { reset(); assertThat( - clusterAdmin().prepareState() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java index ff2117ea93bb..56fcb3c1d123 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/HierarchyCircuitBreakerTelemetryIT.java @@ -63,7 +63,12 @@ public void testCircuitBreakerTripCountMetric() { // NOTE: we start with empty circuitBreakerSettings to allow cluster formation masterNodeName = internalCluster().startMasterOnlyNode(Settings.EMPTY); dataNodeName = internalCluster().startDataOnlyNode(Settings.EMPTY); - assertTrue(clusterAdmin().prepareUpdateSettings().setPersistentSettings(circuitBreakerSettings).get().isAcknowledged()); + assertTrue( + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(circuitBreakerSettings) + .get() + .isAcknowledged() + ); assertTrue( client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java index 0b9ca9d9f958..a51f4bb10dc0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java @@ -325,7 +325,7 @@ public void testDanglingIndicesImportedAndDeletedCannotBeReimported() throws Exc } } - final Metadata metadata = clusterAdmin().prepareState().clear().setMetadata(true).get().getState().metadata(); + final Metadata metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(true).get().getState().metadata(); assertTrue(metadata.indexGraveyard().toString(), metadata.indexGraveyard().containsIndex(new Index(INDEX_NAME, danglingIndexUUID))); assertNull(Strings.toString(metadata, true, true), metadata.index(INDEX_NAME)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index a9e06fe438c4..64f594c488a5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -53,7 +53,7 @@ public void run() { }; indexingThread.start(); - ClusterState initialState = clusterAdmin().prepareState().get().getState(); + ClusterState initialState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DiscoveryNode[] dataNodes = initialState.getNodes().getDataNodes().values().toArray(DiscoveryNode[]::new); DiscoveryNode relocationSource = initialState.getNodes() .getDataNodes() @@ -65,7 +65,7 @@ public void run() { } logger.info("--> [iteration {}] relocating from {} to {} ", i, relocationSource.getName(), relocationTarget.getName()); ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId())); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setTimeout(TimeValue.timeValueSeconds(60)) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) @@ -77,7 +77,7 @@ public void run() { "timed out waiting for relocation iteration [" + i + "]", ReferenceDocs.LOGGING ); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); logger.info("timed out for waiting for relocation iteration [{}] \ncluster state {}", i, clusterState); finished.set(true); indexingThread.join(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index fbbeec4b4e9b..abeaf8422748 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -523,7 +523,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { transportService.clearAllRules(); // make sure nodeA has primary and nodeB has replica - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List startedShards = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED); assertThat(startedShards.size(), equalTo(2)); for (ShardRouting shardRouting : startedShards) { @@ -635,7 +635,7 @@ public void testRerouteRecovery() throws Exception { logger.info("--> start node C"); String nodeC = internalCluster().startNode(); - assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("3").get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("3").get().isTimedOut()); logger.info("--> slowing down recoveries"); throttleRecovery10Seconds(shardSize); @@ -1118,7 +1118,7 @@ public void testOngoingRecoveryAndMasterFailOver() throws Exception { Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2).putNull("index.routing.allocation.include._name"), indexName ); - assertFalse(clusterAdmin().prepareHealth(indexName).setWaitForActiveShards(2).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).setWaitForActiveShards(2).get().isTimedOut()); } finally { allowToCompletePhase1Latch.countDown(); } @@ -1261,7 +1261,7 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseMissing() throws Exception @Override public Settings onNodeStopped(String nodeName) throws Exception { assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)) .setWaitForEvents(Priority.LANGUID) .get() @@ -1325,7 +1325,7 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint() t @Override public Settings onNodeStopped(String nodeName) throws Exception { assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)) .setWaitForEvents(Priority.LANGUID) .get() @@ -1432,7 +1432,7 @@ public void testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonabl @Override public Settings onNodeStopped(String nodeName) throws Exception { assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)) .setWaitForEvents(Priority.LANGUID) .get() @@ -1657,7 +1657,7 @@ public void testPeerRecoveryTrimsLocalTranslog() throws Exception { String indexName = "test-index"; createIndex(indexName, indexSettings(1, 1).put("index.routing.allocation.include._name", String.join(",", dataNodes)).build()); ensureGreen(indexName); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DiscoveryNode nodeWithOldPrimary = clusterState.nodes() .get(clusterState.routingTable().index(indexName).shard(0).primaryShard().currentNodeId()); final var transportService = MockTransportService.getInstance(nodeWithOldPrimary.getName()); @@ -1731,7 +1731,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { indexRandom(randomBoolean(), true, true, indexRequests); assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); DiscoveryNode nodeWithPrimary = clusterState.nodes() .get(clusterState.routingTable().index(indexName).shard(0).primaryShard().currentNodeId()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java index 8595f11bae42..25ac384a2291 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/ReplicaToPrimaryPromotionIT.java @@ -55,7 +55,11 @@ public void testPromoteReplicaToPrimary() throws Exception { } // pick up a data node that contains a random primary shard - ClusterState state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); + ClusterState state = client(internalCluster().getMasterName()).admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState(); final int numShards = state.metadata().index(indexName).getNumberOfShards(); final ShardRouting primaryShard = state.routingTable().index(indexName).shard(randomIntBetween(0, numShards - 1)).primaryShard(); final DiscoveryNode randomNode = state.nodes().resolveNode(primaryShard.currentNodeId()); @@ -64,7 +68,7 @@ public void testPromoteReplicaToPrimary() throws Exception { internalCluster().stopNode(randomNode.getName()); ensureYellowAndNoInitializingShards(indexName); - state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState(); + state = client(internalCluster().getMasterName()).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); final IndexRoutingTable indexRoutingTable = state.routingTable().index(indexName); for (int i = 0; i < indexRoutingTable.size(); i++) { for (ShardRouting shardRouting : indexRoutingTable.shard(i).activeShards()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java index 2f336f25c3ca..d117373b58f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -284,7 +284,7 @@ private ShardSnapshotsService getShardSnapshotsService() { } private ShardId getShardIdForIndex(String indexName) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); return state.routingTable().index(indexName).shard(0).shardId(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java index 67482ad73367..606b694cbfeb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasIT.java @@ -36,7 +36,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { logger.info("Creating index test"); assertAcked(prepareCreate("test", 2)); logger.info("Running Cluster Health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .get(); @@ -62,11 +62,16 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { assertHitCount(prepareSearch().setSize(0).setQuery(matchAllQuery()), 10L); } - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); logger.info("Increasing the number of replicas from 1 to 2"); setReplicaCount(2, "test"); logger.info("Running Cluster Health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -79,7 +84,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { // only 2 copies allocated (1 replica) across 2 nodes assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2)); - final long afterReplicaIncreaseSettingsVersion = clusterAdmin().prepareState() + final long afterReplicaIncreaseSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -90,7 +95,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { logger.info("starting another node to new replicas will be allocated to it"); allowNodes("test", 3); - final long afterStartingAnotherNodeVersion = clusterAdmin().prepareState() + final long afterStartingAnotherNodeVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -98,7 +103,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { .getSettingsVersion(); logger.info("Running Cluster Health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNoRelocatingShards(true) @@ -120,7 +125,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { setReplicaCount(0, "test"); logger.info("Running Cluster Health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNoRelocatingShards(true) @@ -138,7 +143,7 @@ public void testSimpleUpdateNumberOfReplicas() throws Exception { assertHitCount(prepareSearch().setQuery(matchAllQuery()), 10); } - final long afterReplicaDecreaseSettingsVersion = clusterAdmin().prepareState() + final long afterReplicaDecreaseSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -155,7 +160,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { NumShards numShards = getNumShards("test"); logger.info("--> running cluster health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -170,7 +175,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { if (randomBoolean()) { assertAcked(indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.ALL)); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -183,13 +188,18 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2)); } - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); logger.info("--> add another node, should increase the number of replicas"); allowNodes("test", 3); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) @@ -202,7 +212,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3)); - final long afterAddingOneNodeSettingsVersion = clusterAdmin().prepareState() + final long afterAddingOneNodeSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -215,7 +225,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { allowNodes("test", 2); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -228,7 +238,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2)); - final long afterClosingOneNodeSettingsVersion = clusterAdmin().prepareState() + final long afterClosingOneNodeSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -241,7 +251,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { allowNodes("test", 1); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes(">=1") @@ -254,7 +264,7 @@ public void testAutoExpandNumberOfReplicas0ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(0)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries)); - final long afterClosingAnotherNodeSettingsVersion = clusterAdmin().prepareState() + final long afterClosingAnotherNodeSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -271,7 +281,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { NumShards numShards = getNumShards("test"); logger.info("--> running cluster health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -286,7 +296,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { if (randomBoolean()) { assertAcked(indicesAdmin().prepareClose("test").setWaitForActiveShards(ActiveShardCount.ALL)); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 2) @@ -299,12 +309,17 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2)); } - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); logger.info("--> add another node, should increase the number of replicas"); allowNodes("test", 3); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) @@ -316,7 +331,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3)); - final long afterAddingOneNodeSettingsVersion = clusterAdmin().prepareState() + final long afterAddingOneNodeSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -329,7 +344,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { allowNodes("test", 2); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForNodes(">=2") @@ -342,7 +357,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2)); - final long afterClosingOneNodeSettingsVersion = clusterAdmin().prepareState() + final long afterClosingOneNodeSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .get() .getState() .metadata() @@ -355,7 +370,7 @@ public void testAutoExpandNumberReplicas1ToData() throws IOException { allowNodes("test", 1); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForYellowStatus() .setWaitForNodes(">=1") @@ -376,7 +391,7 @@ public void testAutoExpandNumberReplicas2() { NumShards numShards = getNumShards("test"); logger.info("--> running cluster health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 3) @@ -392,12 +407,17 @@ public void testAutoExpandNumberReplicas2() { allowNodes("test", 4); allowNodes("test", 5); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); logger.info("--> update the auto expand replicas to 0-3"); updateIndexSettings(Settings.builder().put("auto_expand_replicas", "0-3"), "test"); logger.info("--> running cluster health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .setWaitForActiveShards(numShards.numPrimaries * 4) @@ -414,14 +434,19 @@ public void testAutoExpandNumberReplicas2() { * time from the number of replicas changed by the allocation service. */ assertThat( - clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").getSettingsVersion(), equalTo(1 + 1 + settingsVersion) ); } public void testUpdateWithInvalidNumberOfReplicas() { createIndex("test"); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); final int value = randomIntBetween(-10, -1); try { indicesAdmin().prepareUpdateSettings("test") @@ -431,7 +456,7 @@ public void testUpdateWithInvalidNumberOfReplicas() { } catch (IllegalArgumentException e) { assertEquals("Failed to parse value [" + value + "] for setting [index.number_of_replicas] must be >= 0", e.getMessage()); assertThat( - clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(), + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").getSettingsVersion(), equalTo(settingsVersion) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 6e58d275e578..20089cd463bf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -64,7 +64,7 @@ public void testInvalidDynamicUpdate() { indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.dummy", "boom")) ); assertEquals(exception.getCause().getMessage(), "this setting goes boom"); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertNotEquals(indexMetadata.getSettings().get("index.dummy"), "invalid dynamic value"); } @@ -141,44 +141,48 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testUpdateDependentClusterSettings() { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); iae = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); iae = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) .setPersistentSettings(Settings.builder().put("cluster.acc.test.user", "asdf")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); if (randomBoolean()) { - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf").put("cluster.acc.test.user", "asdf")) .get(); iae = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("cluster.acc.test.user")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setTransientSettings(Settings.builder().putNull("cluster.acc.test.user")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setTransientSettings(Settings.builder().putNull("cluster.acc.test.pw").putNull("cluster.acc.test.user")) .get(); } else { - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put("cluster.acc.test.pw", "asdf").put("cluster.acc.test.user", "asdf")) .get(); iae = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("cluster.acc.test.user")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().putNull("cluster.acc.test.user")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); @@ -230,7 +234,7 @@ public void testResetDefaultWithWildcard() { createIndex("test"); indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", -1)).get(); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertEquals(indexMetadata.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -239,7 +243,7 @@ public void testResetDefaultWithWildcard() { } } indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.ref*")).get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertNull(indexMetadata.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -259,7 +263,7 @@ public void testResetDefault() { .put("index.translog.generation_threshold_size", "4096b") ) .get(); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertEquals(indexMetadata.getSettings().get("index.refresh_interval"), "-1"); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -270,7 +274,7 @@ public void testResetDefault() { } } indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.refresh_interval")).get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertNull(indexMetadata.getSettings().get("index.refresh_interval")); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(resolveIndex("test")); @@ -303,7 +307,7 @@ public void testOpenCloseUpdateSettings() throws Exception { .put("index.final", "no") ) // this one can't ); - IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), nullValue()); assertThat(indexMetadata.getSettings().get("index.fielddata.cache"), nullValue()); assertThat(indexMetadata.getSettings().get("index.final"), nullValue()); @@ -318,7 +322,7 @@ public void testOpenCloseUpdateSettings() throws Exception { .setSettings(Settings.builder().put("index.refresh_interval", -1)) // this one can change .get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("-1")); // Now verify via dedicated get settings api: getSettingsResponse = indicesAdmin().prepareGetSettings("test").get(); @@ -327,7 +331,7 @@ public void testOpenCloseUpdateSettings() throws Exception { // now close the index, change the non dynamic setting, and see that it applies // Wait for the index to turn green before attempting to close it - ClusterHealthResponse health = clusterAdmin().prepareHealth() + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setTimeout(TimeValue.timeValueSeconds(30)) .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() @@ -338,7 +342,7 @@ public void testOpenCloseUpdateSettings() throws Exception { indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertThat(indexMetadata.getNumberOfReplicas(), equalTo(1)); indicesAdmin().prepareUpdateSettings("test") @@ -349,7 +353,7 @@ public void testOpenCloseUpdateSettings() throws Exception { ) // this one can't .get(); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("1s")); assertThat(indexMetadata.getSettings().get("index.fielddata.cache"), equalTo("none")); @@ -363,7 +367,7 @@ public void testOpenCloseUpdateSettings() throws Exception { ) // this one really can't ); assertThat(ex.getMessage(), containsString("final test setting [index.final], not updateable")); - indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); + indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), equalTo("1s")); assertThat(indexMetadata.getSettings().get("index.final"), nullValue()); @@ -426,22 +430,42 @@ public void testSettingsVersion() { ensureGreen("test"); { - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertAcked( indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "500ms")) ); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(1 + settingsVersion)); } { final boolean block = randomBoolean(); assertAcked(indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.read_only", block))); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertAcked( indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.read_only", block == false)) ); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(1 + settingsVersion)); // if the read-only block is present, remove it @@ -458,12 +482,22 @@ public void testSettingsVersionUnchanged() { ensureGreen("test"); { - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); final String refreshInterval = indicesAdmin().prepareGetSettings("test").get().getSetting("test", "index.refresh_interval"); assertAcked( indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", refreshInterval)) ); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(settingsVersion)); } @@ -471,9 +505,19 @@ public void testSettingsVersionUnchanged() { final boolean block = randomBoolean(); assertAcked(indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.read_only", block))); // now put the same block again - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertAcked(indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.read_only", block))); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(settingsVersion)); // if the read-only block is present, remove it @@ -493,14 +537,24 @@ public void testSettingsVersionUnchanged() { public void testNumberOfReplicasSettingsVersionUnchanged() { createIndex("test"); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); final int numberOfReplicas = Integer.valueOf( indicesAdmin().prepareGetSettings("test").get().getSetting("test", "index.number_of_replicas") ); assertAcked( indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", numberOfReplicas)) ); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(settingsVersion)); } @@ -512,7 +566,12 @@ public void testNumberOfReplicasSettingsVersionUnchanged() { public void testNumberOfReplicasSettingsVersion() { createIndex("test"); - final long settingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long settingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); final int numberOfReplicas = Integer.valueOf( indicesAdmin().prepareGetSettings("test").get().getSetting("test", "index.number_of_replicas") ); @@ -520,7 +579,12 @@ public void testNumberOfReplicasSettingsVersion() { indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put("index.number_of_replicas", 1 + numberOfReplicas)) ); - final long newSettingsVersion = clusterAdmin().prepareState().get().getState().metadata().index("test").getSettingsVersion(); + final long newSettingsVersion = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .metadata() + .index("test") + .getSettingsVersion(); assertThat(newSettingsVersion, equalTo(1 + settingsVersion)); } @@ -574,7 +638,7 @@ public void testNoopUpdate() { indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) ); assertNotSame(currentState, clusterService.state()); - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNoInitializingShards(true) .setWaitForNoRelocatingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index 6b1aafe2f9b1..5501b88d8a26 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -47,7 +47,7 @@ public void testCloseAllRequiresName() { } private void assertIndexIsClosed(String... indices) { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); for (String index : indices) { IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().indices().get(index); assertNotNull(indexMetadata); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index d52294d7584b..3947ae6d2b57 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -162,7 +162,7 @@ public void testCloseUnassignedIndex() throws Exception { .setSettings(Settings.builder().put("index.routing.allocation.include._name", "nothing").build()) ); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.metadata().indices().get(indexName).getState(), is(IndexMetadata.State.OPEN)); assertThat(clusterState.routingTable().allShards().allMatch(ShardRouting::unassigned), is(true)); @@ -182,7 +182,7 @@ public void testConcurrentClose() throws InterruptedException, ExecutionExceptio IntStream.range(0, nbDocs).mapToObj(i -> prepareIndex(indexName).setId(String.valueOf(i)).setSource("num", i)).collect(toList()) ); - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(indexName) + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName) .setWaitForYellowStatus() .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) @@ -243,7 +243,7 @@ public void testCloseWhileDeletingIndices() throws Exception { } indices[i] = indexName; } - assertThat(clusterAdmin().prepareState().get().getState().metadata().indices().size(), equalTo(indices.length)); + assertThat(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indices().size(), equalTo(indices.length)); startInParallel(indices.length * 2, i -> { final String index = indices[i % indices.length]; @@ -285,7 +285,7 @@ public void testConcurrentClosesAndOpens() throws Exception { indexer.stopAndAwaitStopped(); - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); if (clusterState.metadata().indices().get(indexName).getState() == IndexMetadata.State.CLOSE) { assertIndexIsClosed(indexName); assertAcked(indicesAdmin().prepareOpen(indexName)); @@ -310,7 +310,7 @@ public void testCloseIndexWaitForActiveShards() throws Exception { ensureGreen(indexName); final CloseIndexResponse closeIndexResponse = indicesAdmin().prepareClose(indexName).get(); - assertThat(clusterAdmin().prepareHealth(indexName).get().getStatus(), is(ClusterHealthStatus.GREEN)); + assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).get().getStatus(), is(ClusterHealthStatus.GREEN)); assertTrue(closeIndexResponse.isAcknowledged()); assertTrue(closeIndexResponse.isShardsAcknowledged()); assertThat(closeIndexResponse.getIndices().get(0), notNullValue()); @@ -532,7 +532,7 @@ private static void closeIndices(final CloseIndexRequestBuilder requestBuilder) } static void assertIndexIsClosed(final String... indices) { - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (String index : indices) { final IndexMetadata indexMetadata = clusterState.metadata().indices().get(index); assertThat(indexMetadata.getState(), is(IndexMetadata.State.CLOSE)); @@ -555,7 +555,7 @@ static void assertIndexIsClosed(final String... indices) { } static void assertIndexIsOpened(final String... indices) { - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (String index : indices) { final IndexMetadata indexMetadata = clusterState.metadata().indices().get(index); assertThat(indexMetadata.getState(), is(IndexMetadata.State.OPEN)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index a7a2af57ef81..9ec7ebcf9153 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -49,7 +49,7 @@ public class OpenCloseIndexIT extends ESIntegTestCase { public void testSimpleCloseOpen() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test1").get(); @@ -70,7 +70,7 @@ public void testSimpleOpenMissingIndex() { public void testOpenOneMissingIndex() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); Exception e = expectThrows(IndexNotFoundException.class, client.admin().indices().prepareOpen("test1", "test2")); assertThat(e.getMessage(), is("no such index [test2]")); @@ -79,7 +79,7 @@ public void testOpenOneMissingIndex() { public void testOpenOneMissingIndexIgnoreMissing() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); OpenIndexResponse openIndexResponse = client.admin() .indices() @@ -94,7 +94,7 @@ public void testOpenOneMissingIndexIgnoreMissing() { public void testCloseOpenMultipleIndices() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse closeIndexResponse1 = client.admin().indices().prepareClose("test1").get(); @@ -116,7 +116,7 @@ public void testCloseOpenMultipleIndices() { public void testCloseOpenWildcard() { Client client = client(); createIndex("test1", "test2", "a"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("test*").get(); @@ -133,7 +133,7 @@ public void testCloseOpenWildcard() { public void testCloseOpenAll() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("_all").get(); @@ -149,7 +149,7 @@ public void testCloseOpenAll() { public void testCloseOpenAllWildcard() { Client client = client(); createIndex("test1", "test2", "test3"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse closeIndexResponse = client.admin().indices().prepareClose("*").get(); @@ -175,7 +175,7 @@ public void testOpenNullIndex() { public void testOpenAlreadyOpenedIndex() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); // no problem if we try to open an index that's already in open state @@ -188,7 +188,7 @@ public void testOpenAlreadyOpenedIndex() { public void testSimpleCloseOpenAlias() { Client client = client(); createIndex("test1"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse aliasesResponse = client.admin().indices().prepareAliases().addAlias("test1", "test1-alias").get(); @@ -207,7 +207,7 @@ public void testSimpleCloseOpenAlias() { public void testCloseOpenAliasMultipleIndices() { Client client = client(); createIndex("test1", "test2"); - ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); AcknowledgedResponse aliasesResponse1 = client.admin().indices().prepareAliases().addAlias("test1", "test-alias").get(); @@ -240,7 +240,7 @@ public void testOpenWaitingForActiveShardsFailed() throws Exception { assertThat(response.isShardsAcknowledged(), equalTo(false)); assertBusy( () -> assertThat( - client.admin().cluster().prepareState().get().getState().metadata().index("test").getState(), + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN) ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java index 3c16e0f2624e..1b0b71b86f07 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java @@ -159,7 +159,7 @@ private Releasable interceptVerifyShardBeforeCloseActions(final String indexPatt } private static void assertIndexIsBlocked(final String... indices) { - final ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + final ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (String index : indices) { assertThat(clusterState.metadata().indices().get(index).getState(), is(IndexMetadata.State.OPEN)); assertThat(clusterState.routingTable().index(index), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java index b5448498f0ce..c62f1776178b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java @@ -34,7 +34,7 @@ public void testSimpleOpenClose() { NumShards numShards = getNumShards("test"); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(numShards.numPrimaries)); assertEquals( @@ -48,7 +48,7 @@ public void testSimpleOpenClose() { logger.info("--> closing test index..."); assertAcked(indicesAdmin().prepareClose("test")); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test"), notNullValue()); @@ -66,7 +66,7 @@ public void testSimpleOpenClose() { logger.info("--> waiting for green status"); ensureGreen(); - stateResponse = clusterAdmin().prepareState().get(); + stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(numShards.numPrimaries)); @@ -86,7 +86,7 @@ public void testFastCloseAfterCreateContinuesCreateAfterOpen() { .setSettings(Settings.builder().put("index.routing.allocation.include.tag", "no_such_node").build()) .get(); - ClusterHealthResponse health = clusterAdmin().prepareHealth("test").setWaitForNodes(">=2").get(); + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").setWaitForNodes(">=2").get(); assertThat(health.isTimedOut(), equalTo(false)); assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED)); @@ -102,7 +102,7 @@ public void testFastCloseAfterCreateContinuesCreateAfterOpen() { NumShards numShards = getNumShards("test"); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN)); assertThat(stateResponse.getState().routingTable().index("test").size(), equalTo(numShards.numPrimaries)); assertEquals( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java index 7ffc2539d2fa..083823523afa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -237,7 +237,7 @@ public void testClearAllCaches() throws Exception { .setMapping("field", "type=text,fielddata=true") ); ensureGreen(); - clusterAdmin().prepareHealth().setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForGreenStatus().get(); prepareIndex("test").setId("1").setSource("field", "value1").get(); prepareIndex("test").setId("2").setSource("field", "value2").get(); indicesAdmin().prepareRefresh().get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 5eeb07968ce4..58c1a30cb551 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -98,7 +98,7 @@ public void testIndexCleanup() throws Exception { ) ); ensureGreen("test"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Index index = state.metadata().index("test").getIndex(); logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2"); @@ -110,7 +110,10 @@ public void testIndexCleanup() throws Exception { logger.info("--> starting node server3"); final String node_3 = internalCluster().startNode(nonMasterNode()); logger.info("--> running cluster_health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForNodes("4").setWaitForNoRelocatingShards(true).get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForNodes("4") + .setWaitForNoRelocatingShards(true) + .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); @@ -131,7 +134,7 @@ public void testIndexCleanup() throws Exception { } else { ClusterRerouteUtils.reroute(internalCluster().client(), new MoveAllocationCommand("test", 0, node_1, node_3)); } - clusterHealth = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get(); + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNoRelocatingShards(true).get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertShardDeleted(node_1, index, 0); @@ -197,13 +200,13 @@ public void testShardCleanupIfShardDeletionAfterRelocationFailedAndIndexDeleted( ) ); ensureGreen("test"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Index index = state.metadata().index("test").getIndex(); assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true)); final String node_2 = internalCluster().startDataOnlyNode(Settings.builder().build()); - assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("2").get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("2").get().isTimedOut()); assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true)); @@ -226,7 +229,7 @@ public void testShardCleanupIfShardDeletionAfterRelocationFailedAndIndexDeleted( logger.info("--> move shard from {} to {}, and wait for relocation to finish", node_1, node_2); ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node_1, node_2)); shardActiveRequestSent.await(); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNoRelocatingShards(true).get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); logClusterState(); // delete the index. node_1 that still waits for the next cluster state update will then get the delete index next. @@ -258,7 +261,7 @@ public void testShardsCleanup() throws Exception { ); ensureGreen("test"); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); Index index = state.metadata().index("test").getIndex(); logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2"); assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true)); @@ -267,7 +270,10 @@ public void testShardsCleanup() throws Exception { logger.info("--> starting node server3"); String node_3 = internalCluster().startNode(); logger.info("--> running cluster_health"); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForNodes("3").setWaitForNoRelocatingShards(true).get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForNodes("3") + .setWaitForNoRelocatingShards(true) + .get(); assertThat(clusterHealth.isTimedOut(), equalTo(false)); logger.info("--> making sure that shard is not allocated on server3"); @@ -278,7 +284,7 @@ public void testShardsCleanup() throws Exception { internalCluster().stopNode(node_2); logger.info("--> running cluster_health"); - clusterHealth = clusterAdmin().prepareHealth() + clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForGreenStatus() .setWaitForNodes("2") .setWaitForNoRelocatingShards(true) @@ -325,7 +331,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { ) ); assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNoRelocatingShards(true) .setWaitForGreenStatus() .setWaitForNodes("5") @@ -344,7 +350,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { internalCluster().stopNode(nodesToShutDown.get(1)); logger.debug("--> verifying index is red"); - ClusterHealthResponse health = clusterAdmin().prepareHealth().setWaitForNodes("3").get(); + ClusterHealthResponse health = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("3").get(); if (health.getStatus() != ClusterHealthStatus.RED) { logClusterState(); fail("cluster didn't become red, despite of shutting 2 of 3 nodes"); @@ -362,7 +368,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { assertBusy(() -> assertTrue(internalCluster().getInstance(IndicesService.class, node4).hasIndex(index))); // wait for 4 active shards - we should have lost one shard - assertFalse(clusterAdmin().prepareHealth().setWaitForActiveShards(4).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForActiveShards(4).get().isTimedOut()); // disable allocation again to control concurrency a bit and allow shard active to kick in before allocation updateClusterSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")); @@ -371,7 +377,7 @@ public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception { internalCluster().startNodes(node1DataPathSettings, node2DataPathSettings); - assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("5").get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("5").get().isTimedOut()); updateClusterSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all")); @@ -395,7 +401,7 @@ public void testShardActiveElseWhere() throws Exception { ensureGreen("test"); waitNoPendingTasksOnAll(); - ClusterStateResponse stateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse stateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); final Index index = stateResponse.getState().metadata().index("test").getIndex(); RoutingNode routingNode = stateResponse.getState().getRoutingNodes().node(nonMasterId); final int[] node2Shards = new int[routingNode.numberOfOwningShards()]; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 1e1333f376e9..4051dba84588 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -178,7 +178,7 @@ public void testSimpleIndexTemplateTests() throws Exception { } public void testDeleteIndexTemplate() throws Exception { - final int existingTemplates = admin().cluster().prepareState().get().getState().metadata().templates().size(); + final int existingTemplates = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().templates().size(); logger.info("--> put template_1 and template_2"); indicesAdmin().preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) @@ -223,7 +223,7 @@ public void testDeleteIndexTemplate() throws Exception { logger.info("--> explicitly delete template_1"); indicesAdmin().prepareDeleteTemplate("template_1").get(); - ClusterState state = admin().cluster().prepareState().get().getState(); + ClusterState state = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(state.metadata().templates().size(), equalTo(1 + existingTemplates)); assertThat(state.metadata().templates().containsKey("template_2"), equalTo(true)); @@ -254,11 +254,14 @@ public void testDeleteIndexTemplate() throws Exception { logger.info("--> delete template*"); indicesAdmin().prepareDeleteTemplate("template*").get(); - assertThat(admin().cluster().prepareState().get().getState().metadata().templates().size(), equalTo(existingTemplates)); + assertThat( + admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().templates().size(), + equalTo(existingTemplates) + ); logger.info("--> delete * with no templates, make sure we don't get a failure"); indicesAdmin().prepareDeleteTemplate("*").get(); - assertThat(admin().cluster().prepareState().get().getState().metadata().templates().size(), equalTo(0)); + assertThat(admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().templates().size(), equalTo(0)); } public void testThatGetIndexTemplatesWorks() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java index 2e515b07b59a..828c02a2ba89 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java @@ -14,10 +14,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.update.UpdateResponse; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -56,8 +53,7 @@ protected Collection> getPlugins() { public void testAsyncProcessorImplementation() { // A pipeline with 2 processors: the test async processor and sync test processor. - BytesReference pipelineBody = new BytesArray("{\"processors\": [{\"test-async\": {}, \"test\": {}}]}"); - clusterAdmin().putPipeline(new PutPipelineRequest("_id", pipelineBody, XContentType.JSON)).actionGet(); + putJsonPipeline("_id", "{\"processors\": [{\"test-async\": {}, \"test\": {}}]}"); BulkRequest bulkRequest = new BulkRequest(); int numDocs = randomIntBetween(8, 256); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java index 9fd7aaabaf2f..4b26240d8165 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java @@ -16,13 +16,12 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.SimulateDocumentBaseResult; import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineResponse; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.internal.Requests; import org.elasticsearch.common.bytes.BytesReference; @@ -30,7 +29,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import java.util.Collection; @@ -38,6 +36,7 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.ingest.IngestPipelineTestUtils.putJsonPipelineRequest; import static org.elasticsearch.test.NodeRoles.nonIngestNode; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; @@ -63,19 +62,17 @@ protected Collection> nodePlugins() { } public void testSimulate() throws Exception { - BytesReference pipelineSource = BytesReference.bytes( - jsonBuilder().startObject() - .field("description", "my_pipeline") + putJsonPipeline( + "_id", + (builder, params) -> builder.field("description", "my_pipeline") .startArray("processors") .startObject() .startObject("test") .endObject() .endObject() .endArray() - .endObject() ); - clusterAdmin().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); - GetPipelineResponse getResponse = clusterAdmin().prepareGetPipeline("_id").get(); + GetPipelineResponse getResponse = getPipelines("_id"); assertThat(getResponse.isFound(), is(true)); assertThat(getResponse.pipelines().size(), equalTo(1)); assertThat(getResponse.pipelines().get(0).getId(), equalTo("_id")); @@ -118,26 +115,22 @@ public void testSimulate() throws Exception { assertThat(simulateDocumentBaseResult.getFailure(), nullValue()); // cleanup - AcknowledgedResponse deletePipelineResponse = clusterAdmin().prepareDeletePipeline("_id").get(); - assertTrue(deletePipelineResponse.isAcknowledged()); + deletePipeline("_id"); } public void testBulkWithIngestFailures() throws Exception { createIndex("index"); - BytesReference source = BytesReference.bytes( - jsonBuilder().startObject() - .field("description", "my_pipeline") + putJsonPipeline( + "_id", + (builder, params) -> builder.field("description", "my_pipeline") .startArray("processors") .startObject() .startObject("test") .endObject() .endObject() .endArray() - .endObject() ); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); int numRequests = scaledRandomIntBetween(32, 128); BulkRequest bulkRequest = new BulkRequest(); @@ -169,26 +162,22 @@ public void testBulkWithIngestFailures() throws Exception { } // cleanup - AcknowledgedResponse deletePipelineResponse = clusterAdmin().prepareDeletePipeline("_id").get(); - assertTrue(deletePipelineResponse.isAcknowledged()); + deletePipeline("_id"); } public void testBulkWithUpsert() throws Exception { createIndex("index"); - BytesReference source = BytesReference.bytes( - jsonBuilder().startObject() - .field("description", "my_pipeline") + putJsonPipeline( + "_id", + (builder, params) -> builder.field("description", "my_pipeline") .startArray("processors") .startObject() .startObject("test") .endObject() .endObject() .endArray() - .endObject() ); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); BulkRequest bulkRequest = new BulkRequest(); IndexRequest indexRequest = new IndexRequest("index").id("1").setPipeline("_id"); @@ -211,21 +200,18 @@ public void testBulkWithUpsert() throws Exception { } public void test() throws Exception { - BytesReference source = BytesReference.bytes( - jsonBuilder().startObject() - .field("description", "my_pipeline") + putJsonPipeline( + "_id", + (builder, params) -> builder.field("description", "my_pipeline") .startArray("processors") .startObject() .startObject("test") .endObject() .endObject() .endArray() - .endObject() ); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); - GetPipelineResponse getResponse = clusterAdmin().prepareGetPipeline("_id").get(); + GetPipelineResponse getResponse = getPipelines("_id"); assertThat(getResponse.isFound(), is(true)); assertThat(getResponse.pipelines().size(), equalTo(1)); assertThat(getResponse.pipelines().get(0).getId(), equalTo("_id")); @@ -241,11 +227,9 @@ public void test() throws Exception { assertThat(doc.get("field"), equalTo("value2")); assertThat(doc.get("processed"), equalTo(true)); - DeletePipelineRequest deletePipelineRequest = new DeletePipelineRequest("_id"); - AcknowledgedResponse response = clusterAdmin().deletePipeline(deletePipelineRequest).get(); - assertThat(response.isAcknowledged(), is(true)); + deletePipeline("_id"); - getResponse = clusterAdmin().prepareGetPipeline("_id").get(); + getResponse = getPipelines("_id"); assertThat(getResponse.isFound(), is(false)); assertThat(getResponse.pipelines().size(), equalTo(0)); } @@ -263,29 +247,29 @@ public void testPutWithPipelineFactoryError() throws Exception { .endArray() .endObject() ); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id2", source, XContentType.JSON); - Exception e = expectThrows(ElasticsearchParseException.class, clusterAdmin().putPipeline(putPipelineRequest)); + PutPipelineRequest putPipelineRequest = putJsonPipelineRequest("_id2", source); + Exception e = expectThrows( + ElasticsearchParseException.class, + client().execute(PutPipelineTransportAction.TYPE, putPipelineRequest) + ); assertThat(e.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); - GetPipelineResponse response = clusterAdmin().prepareGetPipeline("_id2").get(); + GetPipelineResponse response = getPipelines("_id2"); assertFalse(response.isFound()); } public void testWithDedicatedMaster() throws Exception { String masterOnlyNode = internalCluster().startMasterOnlyNode(); - BytesReference source = BytesReference.bytes( - jsonBuilder().startObject() - .field("description", "my_pipeline") + putJsonPipeline( + "_id", + (builder, params) -> builder.field("description", "my_pipeline") .startArray("processors") .startObject() .startObject("test") .endObject() .endObject() .endArray() - .endObject() ); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); BulkItemResponse item = client(masterOnlyNode).prepareBulk() .add(prepareIndex("test").setSource("field", "value2", "drop", true).setPipeline("_id")) @@ -296,56 +280,38 @@ public void testWithDedicatedMaster() throws Exception { } public void testPipelineOriginHeader() throws Exception { - { - XContentBuilder source = jsonBuilder().startObject(); + putJsonPipeline("1", (source, params) -> { + source.startArray("processors"); + source.startObject(); { - source.startArray("processors"); - source.startObject(); - { - source.startObject("pipeline"); - source.field("name", "2"); - source.endObject(); - } + source.startObject("pipeline"); + source.field("name", "2"); source.endObject(); - source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("1", BytesReference.bytes(source), XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); - } - { - XContentBuilder source = jsonBuilder().startObject(); + return source.endArray(); + }); + putJsonPipeline("2", (source, params) -> { + source.startArray("processors"); + source.startObject(); { - source.startArray("processors"); - source.startObject(); - { - source.startObject("pipeline"); - source.field("name", "3"); - source.endObject(); - } + source.startObject("pipeline"); + source.field("name", "3"); source.endObject(); - source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("2", BytesReference.bytes(source), XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); - } - { - XContentBuilder source = jsonBuilder().startObject(); + return source.endArray(); + }); + putJsonPipeline("3", (source, params) -> { + source.startArray("processors"); + source.startObject(); { - source.startArray("processors"); - source.startObject(); - { - source.startObject("fail"); - source.endObject(); - } + source.startObject("fail"); source.endObject(); - source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("3", BytesReference.bytes(source), XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); - } + return source.endArray(); + }); Exception e = expectThrows(Exception.class, () -> { IndexRequest indexRequest = new IndexRequest("test"); @@ -359,8 +325,7 @@ public void testPipelineOriginHeader() throws Exception { } public void testPipelineProcessorOnFailure() throws Exception { - { - XContentBuilder source = jsonBuilder().startObject(); + putJsonPipeline("1", (source, params) -> { { source.startArray("processors"); source.startObject(); @@ -382,43 +347,29 @@ public void testPipelineProcessorOnFailure() throws Exception { source.endObject(); source.endArray(); } - source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("1", BytesReference.bytes(source), XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); - } - { - XContentBuilder source = jsonBuilder().startObject(); + return source; + }); + putJsonPipeline("2", (source, params) -> { + source.startArray("processors"); + source.startObject(); { - source.startArray("processors"); - source.startObject(); - { - source.startObject("pipeline"); - source.field("name", "3"); - source.endObject(); - } + source.startObject("pipeline"); + source.field("name", "3"); source.endObject(); - source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("2", BytesReference.bytes(source), XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); - } - { - XContentBuilder source = jsonBuilder().startObject(); + return source.endArray(); + }); + putJsonPipeline("3", (source, params) -> { + source.startArray("processors"); + source.startObject(); { - source.startArray("processors"); - source.startObject(); - { - source.startObject("fail"); - source.endObject(); - } + source.startObject("fail"); source.endObject(); - source.endArray(); } source.endObject(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("3", BytesReference.bytes(source), XContentType.JSON); - clusterAdmin().putPipeline(putPipelineRequest).get(); - } + return source.endArray(); + }); prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).setPipeline("1").get(); Map inserted = client().prepareGet("test", "1").get().getSourceAsMap(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java index a3c43de39218..5ec3e18d124e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java @@ -19,7 +19,6 @@ import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.plugins.Plugin; @@ -41,6 +40,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import static org.elasticsearch.common.bytes.BytesReference.bytes; +import static org.elasticsearch.ingest.IngestPipelineTestUtils.putJsonPipelineRequest; import static org.elasticsearch.xcontent.XContentType.JSON; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -158,7 +159,7 @@ private void assertPipelinesSaveOK(CountDownLatch savedClusterState, AtomicLong assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).get(); ReservedStateMetadata reservedState = clusterStateResponse.getState() @@ -252,7 +253,7 @@ private PutPipelineRequest sampleRestRequest(String id) throws Exception { var builder = XContentFactory.contentBuilder(JSON) ) { builder.map(parser.map()); - return new PutPipelineRequest(id, BytesReference.bytes(builder), JSON); + return putJsonPipelineRequest(id, bytes(builder)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 5f036681f849..08c3d690ef47 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -9,12 +9,13 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.node.NodeService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.Arrays; @@ -24,7 +25,6 @@ import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST) @@ -51,7 +51,7 @@ protected Collection> nodePlugins() { return installPlugin ? Arrays.asList(IngestTestPlugin.class) : Collections.emptyList(); } - public void testFailPipelineCreation() throws Exception { + public void testFailPipelineCreation() { installPlugin = true; String node1 = internalCluster().startNode(); installPlugin = false; @@ -59,12 +59,22 @@ public void testFailPipelineCreation() throws Exception { ensureStableCluster(2, node1); ensureStableCluster(2, node2); - try { - clusterAdmin().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); - fail("exception expected"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), containsString("Processor type [test] is not installed on node")); - } + assertThat( + asInstanceOf( + ElasticsearchParseException.class, + ExceptionsHelper.unwrapCause( + safeAwaitFailure( + AcknowledgedResponse.class, + l -> client().execute( + PutPipelineTransportAction.TYPE, + IngestPipelineTestUtils.putJsonPipelineRequest("id", pipelineSource), + l + ) + ) + ) + ).getMessage(), + containsString("Processor type [test] is not installed on node") + ); } public void testFailPipelineCreationProcessorNotInstalledOnMasterNode() throws Exception { @@ -72,12 +82,22 @@ public void testFailPipelineCreationProcessorNotInstalledOnMasterNode() throws E installPlugin = true; internalCluster().startNode(); - try { - clusterAdmin().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); - fail("exception expected"); - } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), equalTo("No processor type exists with name [test]")); - } + assertThat( + asInstanceOf( + ElasticsearchParseException.class, + ExceptionsHelper.unwrapCause( + safeAwaitFailure( + AcknowledgedResponse.class, + l -> client().execute( + PutPipelineTransportAction.TYPE, + IngestPipelineTestUtils.putJsonPipelineRequest("id", pipelineSource), + l + ) + ) + ) + ).getMessage(), + equalTo("No processor type exists with name [test]") + ); } // If there is pipeline defined and a node joins that doesn't have the processor installed then @@ -86,8 +106,7 @@ public void testFailStartNode() throws Exception { installPlugin = true; String node1 = internalCluster().startNode(); - AcknowledgedResponse response = clusterAdmin().preparePutPipeline("_id", pipelineSource, XContentType.JSON).get(); - assertThat(response.isAcknowledged(), is(true)); + putJsonPipeline("_id", pipelineSource); Pipeline pipeline = internalCluster().getInstance(NodeService.class, node1).getIngestService().getPipeline("_id"); assertThat(pipeline, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java index 86e1d2e332f3..63a16eae6e1e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java @@ -14,10 +14,7 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.MockScriptPlugin; @@ -92,8 +89,7 @@ public void testIngestStatsNamesAndTypes() throws IOException { ] } """, MockScriptEngine.NAME, MockScriptEngine.NAME); - BytesReference pipeline1Reference = new BytesArray(pipeline1); - clusterAdmin().putPipeline(new PutPipelineRequest("pipeline1", pipeline1Reference, XContentType.JSON)).actionGet(); + putJsonPipeline("pipeline1", pipeline1); // index a single document through the pipeline BulkRequest bulkRequest = new BulkRequest(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java index 9b60044c94f7..eec90241fd90 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/nodescapabilities/SimpleNodesCapabilitiesIT.java @@ -24,7 +24,10 @@ public class SimpleNodesCapabilitiesIT extends ESIntegTestCase { public void testNodesCapabilities() throws IOException { internalCluster().startNodes(2); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setWaitForNodes("2") + .get(); logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); // check we support the capabilities API itself. Which we do. diff --git a/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index a5700c319aa5..e4c83c81a768 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -33,7 +33,10 @@ public void testNodesInfos() { final String node_1 = nodesNames.get(0); final String node_2 = nodesNames.get(1); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setWaitForNodes("2") + .get(); logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = getNodeId(node_1); @@ -72,7 +75,10 @@ public void testNodesInfosTotalIndexingBuffer() { final String node_1 = nodesNames.get(0); final String node_2 = nodesNames.get(1); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setWaitForNodes("2") + .get(); logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = getNodeId(node_1); @@ -110,7 +116,10 @@ public void testAllocatedProcessors() throws Exception { final String node_1 = nodeNames.get(0); final String node_2 = nodeNames.get(1); - ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get(); + ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) + .setWaitForGreenStatus() + .setWaitForNodes("2") + .get(); logger.info("--> done cluster_health, status {}", clusterHealth.getStatus()); String server1NodeId = getNodeId(node_1); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java index a49fadb0c4b5..a851ecb11c79 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java @@ -84,7 +84,7 @@ public void testCloseIndexDefaultBehaviour() throws Exception { assertAcked(indicesAdmin().prepareClose("*").get()); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (Map.Entry indexMetadataEntry : state.getMetadata().indices().entrySet()) { assertEquals(IndexMetadata.State.CLOSE, indexMetadataEntry.getValue().getState()); } @@ -117,7 +117,7 @@ public void testOpenIndexDefaultBehaviour() throws Exception { assertAcked(indicesAdmin().prepareOpen("*").get()); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); for (Map.Entry indexMetadataEntry : state.getMetadata().indices().entrySet()) { assertEquals(IndexMetadata.State.OPEN, indexMetadataEntry.getValue().getState()); } @@ -150,7 +150,7 @@ public void testAddIndexBlockDefaultBehaviour() throws Exception { assertAcked(indicesAdmin().prepareAddBlock(WRITE, "*").get()); } - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertTrue("write block is set on index1", state.getBlocks().hasIndexBlock("index1", IndexMetadata.INDEX_WRITE_BLOCK)); assertTrue("write block is set on 1index", state.getBlocks().hasIndexBlock("1index", IndexMetadata.INDEX_WRITE_BLOCK)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index e7d23f97fc99..caaea0a8a384 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -110,7 +110,7 @@ public void testEnableAssignmentAfterRestart() throws Exception { } private void assertEnableAssignmentSetting(final Allocation expected) { - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().clear().setMetadata(true).get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(true).get(); Settings settings = clusterStateResponse.getState().getMetadata().settings(); String value = settings.get(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java index db26d630fefe..21aa8a877035 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java @@ -73,7 +73,7 @@ public void testListenersInvokedWhenIndexIsDeleted() throws Exception { final NumShards numShards = getNumShards(indexName); assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices(indexName) .setWaitForGreenStatus() .setWaitForEvents(Priority.LANGUID) @@ -140,7 +140,7 @@ public void testListenersInvokedWhenIndexIsRelocated() throws Exception { final NumShards numShards = getNumShards(indexName); assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices(indexName) .setWaitForGreenStatus() .setWaitForEvents(Priority.LANGUID) @@ -206,7 +206,7 @@ public void testListenersInvokedWhenIndexIsDangling() throws Exception { final NumShards numShards = getNumShards(indexName); assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setIndices(indexName) .setWaitForGreenStatus() .setWaitForEvents(Priority.LANGUID) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java index 8335b3c0c424..6be1612c32ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java @@ -111,13 +111,16 @@ protected Collection> getMockPlugins() { } private void assertMasterNode(Client client, String node) { - assertThat(client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(node)); + assertThat( + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNode().getName(), + equalTo(node) + ); } private void expectMasterNotFound() { expectThrows( MasterNotDiscoveredException.class, - clusterAdmin().prepareState().setMasterNodeTimeout(TimeValue.timeValueMillis(100)) + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).setMasterNodeTimeout(TimeValue.timeValueMillis(100)) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java index da59d306d411..adc9db1cb3ed 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -62,7 +62,7 @@ public void testFullRollingRestart() throws Exception { // make sure the cluster state is green, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForGreenStatus() @@ -76,7 +76,7 @@ public void testFullRollingRestart() throws Exception { // make sure the cluster state is green, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForGreenStatus() @@ -94,7 +94,7 @@ public void testFullRollingRestart() throws Exception { internalCluster().stopRandomDataNode(); // make sure the cluster state is green, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForGreenStatus() @@ -105,7 +105,7 @@ public void testFullRollingRestart() throws Exception { internalCluster().stopRandomDataNode(); // make sure the cluster state is green, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForGreenStatus() @@ -123,7 +123,7 @@ public void testFullRollingRestart() throws Exception { internalCluster().stopRandomDataNode(); // make sure the cluster state is green, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForGreenStatus() @@ -135,7 +135,7 @@ public void testFullRollingRestart() throws Exception { // make sure the cluster state is yellow, and all has been recovered assertTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(healthTimeout) .setWaitForYellowStatus() @@ -168,7 +168,7 @@ public void testNoRebalanceOnRollingRestart() throws Exception { prepareIndex("test").setId(Long.toString(i)).setSource(Map.of("test", "value" + i)).get(); } ensureGreen(); - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); RecoveryResponse recoveryResponse = indicesAdmin().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { assertNotEquals( @@ -186,7 +186,7 @@ public void testNoRebalanceOnRollingRestart() throws Exception { } internalCluster().restartRandomDataNode(); ensureGreen(); - clusterAdmin().prepareState().get(); + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); recoveryResponse = indicesAdmin().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index 70aabbc8c30d..c066e3098df6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -102,7 +102,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { logger.info("--> waiting for GREEN health status ..."); // make sure the cluster state is green, and all has been recovered assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForGreenStatus() @@ -163,7 +163,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() thr logger.info("--> waiting for GREEN health status ..."); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForGreenStatus() @@ -225,7 +225,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception logger.info("--> waiting for GREEN health status ..."); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForGreenStatus() @@ -242,7 +242,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception allowNodes("test", 3); logger.info("--> waiting for relocations ..."); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForNoRelocatingShards(true) @@ -252,7 +252,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception allowNodes("test", 2); logger.info("--> waiting for relocations ..."); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForNoRelocatingShards(true) @@ -262,7 +262,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception allowNodes("test", 1); logger.info("--> waiting for relocations ..."); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForNoRelocatingShards(true) @@ -273,7 +273,7 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception logger.info("--> indexing threads stopped"); assertNoTimeout( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(5)) .setWaitForNoRelocatingShards(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java index 17daf403e056..52a95b206586 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RelocationIT.java @@ -140,7 +140,7 @@ public void testSimpleRelocationNoIndexing() { logger.info("--> start another node"); final String node_2 = internalCluster().startNode(); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .get(); @@ -149,7 +149,7 @@ public void testSimpleRelocationNoIndexing() { logger.info("--> relocate the shard from node1 to node2"); ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node_1, node_2)); - clusterHealthResponse = clusterAdmin().prepareHealth() + clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) @@ -184,7 +184,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.info("--> starting [node{}] ...", i); nodes[i - 1] = internalCluster().startNode(); if (i != numberOfNodes) { - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(i)) .setWaitForGreenStatus() @@ -215,7 +215,7 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.debug("--> flushing"); indicesAdmin().prepareFlush().get(); } - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) @@ -288,7 +288,7 @@ public void testRelocationWhileRefreshing() throws Exception { logger.info("--> starting [node_{}] ...", i); nodes[i] = internalCluster().startNode(); if (i != numberOfNodes - 1) { - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(i + 1)) .setWaitForGreenStatus() @@ -349,7 +349,7 @@ public void indexShardStateChanged( // verify cluster was finished. assertFalse( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForNoRelocatingShards(true) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueSeconds(30)) @@ -390,7 +390,7 @@ public void testCancellationCleansTempFiles() throws Exception { requests.add(prepareIndex(indexName).setSource("{}", XContentType.JSON)); } indexRandom(true, requests); - assertFalse(clusterAdmin().prepareHealth().setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut()); flush(); int allowedFailures = randomIntBetween(3, 5); // the default of the `index.allocation.max_retries` is 5. @@ -418,7 +418,7 @@ public void testCancellationCleansTempFiles() throws Exception { if (node.equals(p_node)) { continue; } - ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = client(node).admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).setLocal(true).get().getState(); assertThat( node + " indicates assigned replicas", state.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).size(), @@ -551,7 +551,7 @@ public void testRelocateWhileWaitingForRefresh() { logger.info("--> start another node"); final String node2 = internalCluster().startNode(); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .get(); @@ -560,7 +560,7 @@ public void testRelocateWhileWaitingForRefresh() { logger.info("--> relocate the shard from node1 to node2"); ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node1, node2)); - clusterHealthResponse = clusterAdmin().prepareHealth() + clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) @@ -602,7 +602,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E logger.info("--> start another node"); final String node2 = internalCluster().startNode(); - ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth() + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes("2") .get(); @@ -623,7 +623,7 @@ public void testRelocateWhileContinuouslyIndexingAndWaitingForRefresh() throws E ); } safeGet(relocationListener); - clusterHealthResponse = clusterAdmin().prepareHealth() + clusterHealthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNoRelocatingShards(true) .setTimeout(ACCEPTABLE_RELOCATION_TIME) @@ -670,7 +670,7 @@ public void testRelocationEstablishedPeerRecoveryRetentionLeases() throws Except private void assertActiveCopiesEstablishedPeerRecoveryRetentionLeases() throws Exception { assertBusy(() -> { - for (String index : clusterAdmin().prepareState().get().getState().metadata().indices().keySet()) { + for (String index : clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().indices().keySet()) { Map> byShardId = Stream.of(indicesAdmin().prepareStats(index).get().getShards()) .collect(Collectors.groupingBy(l -> l.getShardRouting().shardId())); for (List shardStats : byShardId.values()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RestartInactiveAutoExpandReplicaNotStaleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RestartInactiveAutoExpandReplicaNotStaleIT.java index 0b56eb36c08e..a1aecab66bbf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RestartInactiveAutoExpandReplicaNotStaleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RestartInactiveAutoExpandReplicaNotStaleIT.java @@ -28,7 +28,7 @@ public void testNotStale() throws Exception { ensureGreen(); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get(); IndexMetadata target = clusterStateResponse.getState().getMetadata().index("test"); internalCluster().restartNode(replica, new InternalTestCluster.RestartCallback() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index 6c7bcd17af1f..1e3a7d5a6b81 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -107,6 +108,8 @@ public void testGetShardSnapshotOnEmptyRepositoriesListThrowsAnError() { expectThrows(IllegalArgumentException.class, () -> getLatestSnapshotForShardFuture(Collections.emptyList(), "idx", 0, false)); } + @UpdateForV9 + // below we were selecting an index version between current and 7.5.0, this has been updated to 8.0.0 now but that might need to change public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exception { final String repoName = "repo-name"; final Path repoPath = randomRepoPath(); @@ -114,7 +117,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce final boolean useBwCFormat = randomBoolean(); if (useBwCFormat) { - final IndexVersion version = randomVersionBetween(random(), IndexVersions.V_7_5_0, IndexVersion.current()); + final IndexVersion version = randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersion.current()); initWithSnapshotVersion(repoName, repoPath, version); } @@ -137,7 +140,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce final SnapshotInfo snapshotInfo = createSnapshot(repoName, Strings.format("snap-%03d", i), snapshotIndices); if (snapshotInfo.indices().contains(indexName)) { lastSnapshot = snapshotInfo; - ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().get(); + ClusterStateResponse clusterStateResponse = admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get(); IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(indexName); expectedIndexMetadataId = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetadata); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java index 4665dc486a90..702eba9b2bfb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreCorruptionIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.repositories.blobstore; +import org.apache.logging.log4j.Level; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -22,9 +23,11 @@ import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.MockLog; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.junit.Before; +import java.nio.file.Files; import java.util.ArrayList; import java.util.List; @@ -65,17 +68,51 @@ public void testCorruptionDetection() throws Exception { // detect corruption by taking another snapshot if (corruptedFileType == RepositoryFileType.SHARD_GENERATION) { - corruptionDetectors.add(exceptionListener -> { - logger.info("--> taking another snapshot"); - client().admin() - .cluster() - .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, randomIdentifier()) - .setWaitForCompletion(true) - .execute(exceptionListener.map(createSnapshotResponse -> { - assertNotEquals(SnapshotState.SUCCESS, createSnapshotResponse.getSnapshotInfo().state()); - return new ElasticsearchException("create-snapshot failed as expected"); - })); - }); + if (Files.exists(corruptedFile)) { + corruptionDetectors.add(exceptionListener -> { + logger.info("--> taking another snapshot"); + client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, randomIdentifier()) + .setWaitForCompletion(true) + .execute(exceptionListener.map(createSnapshotResponse -> { + assertNotEquals(SnapshotState.SUCCESS, createSnapshotResponse.getSnapshotInfo().state()); + return new ElasticsearchException("create-snapshot failed as expected"); + })); + }); + } else { + corruptionDetectors.add(exceptionListener -> { + logger.info("--> taking another snapshot"); + final var mockLog = MockLog.capture(BlobStoreRepository.class); + mockLog.addExpectation( + new MockLog.SeenEventExpectation( + "fallback message", + "org.elasticsearch.repositories.blobstore.BlobStoreRepository", + Level.ERROR, + "index [*] shard generation [*] in [" + + repositoryName + + "][*] not found - falling back to reading all shard snapshots" + ) + ); + mockLog.addExpectation( + new MockLog.SeenEventExpectation( + "shard blobs list", + "org.elasticsearch.repositories.blobstore.BlobStoreRepository", + Level.ERROR, + "read shard snapshots [*] due to missing shard generation [*] for index [*] in [" + repositoryName + "][*]" + ) + ); + client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, randomIdentifier()) + .setWaitForCompletion(true) + .execute(ActionListener.releaseAfter(exceptionListener.map(createSnapshotResponse -> { + assertEquals(SnapshotState.SUCCESS, createSnapshotResponse.getSnapshotInfo().state()); + mockLog.assertAllExpectationsMatched(); + return new ElasticsearchException("create-snapshot logged errors as expected"); + }), mockLog)); + }); + } } // detect corruption by restoring the snapshot diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index 01b01fdf5fcd..b8ada92c9033 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.getRepositoryDataBlobName; import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomNonDataPurpose; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; import static org.hamcrest.Matchers.containsString; @@ -65,7 +66,10 @@ public void testRepeatCleanupsDontRemove() throws Exception { ); logger.info("--> ensure cleanup is still in progress"); - final RepositoryCleanupInProgress cleanup = clusterAdmin().prepareState().get().getState().custom(RepositoryCleanupInProgress.TYPE); + final RepositoryCleanupInProgress cleanup = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .custom(RepositoryCleanupInProgress.TYPE); assertTrue(cleanup.hasCleanupInProgress()); logger.info("--> unblocking master node"); @@ -151,12 +155,7 @@ public void testCleanupOldIndexN() throws ExecutionException, InterruptedExcepti createOldIndexNFuture, () -> repository.blobStore() .blobContainer(repository.basePath()) - .writeBlob( - randomNonDataPurpose(), - BlobStoreRepository.INDEX_FILE_PREFIX + generation, - new BytesArray(new byte[1]), - true - ) + .writeBlob(randomNonDataPurpose(), getRepositoryDataBlobName(generation), new BytesArray(new byte[1]), true) ) ); createOldIndexNFuture.get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java index 4ce92610eff1..82b6ba15930b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java @@ -357,7 +357,10 @@ public class ComponentTemplatesFileSettingsIT extends ESIntegTestCase { }"""; private void assertMasterNode(Client client, String node) throws ExecutionException, InterruptedException { - assertThat(client.admin().cluster().prepareState().execute().get().getState().nodes().getMasterNode().getName(), equalTo(node)); + assertThat( + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).execute().get().getState().nodes().getMasterNode().getName(), + equalTo(node) + ); } private void writeJSONFile(String node, String json) throws Exception { @@ -403,7 +406,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).actionGet(); Map allTemplates = clusterStateResponse.getState().metadata().templatesV2(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index 2fe808d813cc..049a58b63355 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -102,7 +102,10 @@ public class FileSettingsServiceIT extends ESIntegTestCase { }"""; private void assertMasterNode(Client client, String node) { - assertThat(client.admin().cluster().prepareState().get().getState().nodes().getMasterNode().getName(), equalTo(node)); + assertThat( + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().nodes().getMasterNode().getName(), + equalTo(node) + ); } public static void writeJSONFile(String node, String json, AtomicLong versionCounter, Logger logger) throws Exception { @@ -169,7 +172,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo assertTrue(savedClusterState.await(20, TimeUnit.SECONDS)); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).actionGet(); assertThat( @@ -177,7 +180,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo equalTo(expectedBytesPerSec) ); - ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).persistentSettings( Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb") ); assertThat( @@ -257,7 +260,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { logger.info("--> restart master"); internalCluster().restartNode(masterNode); - final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest()).actionGet(); + final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertThat( clusterStateResponse.getState() .metadata() @@ -300,7 +303,7 @@ private void assertClusterStateNotSaved(CountDownLatch savedClusterState, Atomic assertTrue(awaitSuccessful); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get()) ).actionGet(); assertThat(clusterStateResponse.getState().metadata().persistentSettings().get("search.allow_expensive_queries"), nullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java index 1ca2526b53df..7cec6e895a52 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java @@ -94,7 +94,10 @@ public class RepositoriesFileSettingsIT extends ESIntegTestCase { }"""; private void assertMasterNode(Client client, String node) throws ExecutionException, InterruptedException { - assertThat(client.admin().cluster().prepareState().execute().get().getState().nodes().getMasterNode().getName(), equalTo(node)); + assertThat( + client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).execute().get().getState().nodes().getMasterNode().getName(), + equalTo(node) + ); } private void writeJSONFile(String node, String json) throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java index 049260e14100..087274a86221 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java @@ -133,7 +133,7 @@ private ClusterStateResponse assertClusterStateSaveOK(CountDownLatch savedCluste boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); - return clusterAdmin().state(new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get())).get(); + return clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(metadataVersion.get())).get(); } public void testRestoreWithRemovedFileSettings() throws Exception { @@ -180,14 +180,15 @@ public void testRestoreWithRemovedFileSettings() throws Exception { ensureGreen(); - final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().metadata(true)).actionGet(); + final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest(TEST_REQUEST_TIMEOUT).metadata(true)) + .actionGet(); // We expect no reserved metadata state for file based settings, the operator file was deleted. assertNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( ClusterGetSettingsAction.INSTANCE, - new ClusterGetSettingsAction.Request() + new ClusterGetSettingsAction.Request(TEST_REQUEST_TIMEOUT) ).actionGet(); assertThat( @@ -305,14 +306,14 @@ public void testRestoreWithPersistedFileSettings() throws Exception { logger.info("--> reserved state would be restored to non-zero version"); final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().metadata(true).waitForMetadataVersion(removedReservedState.v2().get()) + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).metadata(true).waitForMetadataVersion(removedReservedState.v2().get()) ).actionGet(); assertNotNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( ClusterGetSettingsAction.INSTANCE, - new ClusterGetSettingsAction.Request() + new ClusterGetSettingsAction.Request(TEST_REQUEST_TIMEOUT) ).actionGet(); assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java index 502f02d9ce17..2ab77444f86b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java @@ -51,7 +51,12 @@ public void testRollingRestartOfTwoNodeCluster() throws Exception { ); ensureGreen("test"); - final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState().clear().setNodes(true).get().getState().nodes(); + final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get() + .getState() + .nodes(); final Map nodeIdsByName = Maps.newMapWithExpectedSize(discoveryNodes.getSize()); discoveryNodes.forEach(n -> nodeIdsByName.put(n.getName(), n.getId())); @@ -98,7 +103,7 @@ public Settings onNodeStopped(String nodeName) throws IOException { ClusterHealthResponse clusterHealthResponse = client(viaNode).admin() .cluster() - .prepareHealth() + .prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(1)) .setTimeout(TimeValue.timeValueSeconds(30L)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java index 53001e30763a..1e18f156f1fc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/AliasResolveRoutingIT.java @@ -51,7 +51,7 @@ public void testSearchClosedWildcardIndex() throws ExecutionException, Interrupt public void testResolveIndexRouting() { createIndex("test1"); createIndex("test2"); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); indicesAdmin().prepareAliases() .addAliasAction(AliasActions.add().index("test1").alias("alias")) @@ -93,7 +93,7 @@ public void testResolveSearchRouting() { createIndex("test1"); createIndex("test2"); createIndex("test3"); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); indicesAdmin().prepareAliases() .addAliasAction(AliasActions.add().index("test1").alias("alias")) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java index e25da54d7b21..20c197bf7389 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java @@ -93,8 +93,13 @@ public void testShrinking() throws Exception { Settings.builder() .put( "index.routing.allocation.require._name", - clusterAdmin().prepareState().get().getState().nodes().getDataNodes().values().toArray(DiscoveryNode[]::new)[0] - .getName() + clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .nodes() + .getDataNodes() + .values() + .toArray(DiscoveryNode[]::new)[0].getName() ) .put("index.blocks.write", true), index diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java index f59ec4d42089..2eb37291d41c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -49,7 +49,7 @@ protected int minimumNumberOfShards() { } public String findNonMatchingRoutingValue(String index, String id) { - ClusterState state = clusterAdmin().prepareState().all().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).all().get().getState(); IndexMetadata metadata = state.metadata().index(index); IndexMetadata withoutRoutingRequired = IndexMetadata.builder(metadata).putMapping("{}").build(); IndexRouting indexRouting = IndexRouting.fromIndexMetadata(withoutRoutingRequired); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java index 5625299890b7..b71dd4a39b19 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchServiceCleanupOnLostMasterIT.java @@ -56,7 +56,7 @@ public void testDroppedOutNode() throws Exception { assertBusy(() -> { final ClusterHealthStatus indexHealthStatus = client(master).admin() .cluster() - .health(new ClusterHealthRequest("test")) + .health(new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "test")) .actionGet() .getStatus(); assertThat(indexHealthStatus, Matchers.is(ClusterHealthStatus.RED)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 0ed83f73e418..17b976bdd374 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -223,7 +223,7 @@ public void testSimpleWithCustomKeys() throws Exception { } public void testUnmapped() throws Exception { - clusterAdmin().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx_unmapped").setWaitForYellowStatus().get(); assertNoFailuresAndResponse( prepareSearch("idx_unmapped").addAggregation( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java index 6a60969e632e..0c39859856d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java @@ -761,7 +761,7 @@ public void testUnmapped() throws Exception { } public void testPartiallyUnmapped() throws Exception { - clusterAdmin().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "idx_unmapped").setWaitForYellowStatus().get(); assertNoFailuresAndResponse( prepareSearch("idx", "idx_unmapped").addAggregation( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java index 152549617641..9e2139f832f1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java @@ -99,7 +99,9 @@ private void setClusterDefaultAllowPartialResults(boolean allowPartialResults) { Settings persistentSettings = Settings.builder().put(key, allowPartialResults).build(); - ClusterUpdateSettingsResponse response1 = clusterAdmin().prepareUpdateSettings().setPersistentSettings(persistentSettings).get(); + ClusterUpdateSettingsResponse response1 = clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(persistentSettings) + .get(); assertAcked(response1); assertEquals(response1.getPersistentSettings().getAsBoolean(key, null), allowPartialResults); @@ -115,10 +117,10 @@ private void buildRedIndex(int numShards) throws Exception { internalCluster().stopRandomDataNode(); - clusterAdmin().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForStatus(ClusterHealthStatus.RED).get(); assertBusy(() -> { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); List unassigneds = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.UNASSIGNED); assertThat(unassigneds.size(), greaterThan(0)); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java index 68d00321848e..df6994c57f42 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java @@ -61,7 +61,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) logger.info("using preference {}", preference); // we want to make sure that while recovery happens, and a replica gets recovered, its properly refreshed - ClusterHealthStatus status = clusterAdmin().prepareHealth("test").get().getStatus(); + ClusterHealthStatus status = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").get().getStatus(); while (status != ClusterHealthStatus.GREEN) { // first, verify that search normal search works assertHitCount(prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "test")), 1); @@ -97,7 +97,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) assertHitCount(searchResponse, 1); } ); - status = clusterAdmin().prepareHealth("test").get().getStatus(); + status = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test").get().getStatus(); internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1); } cluster().wipeIndices("test"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 657158327bf0..a9b0f75fe45b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -125,7 +125,7 @@ public void run() { threads[j].join(); } // this might time out on some machines if they are really busy and you hit lots of throttling - ClusterHealthResponse resp = clusterAdmin().prepareHealth() + ClusterHealthResponse resp = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForYellowStatus() .setWaitForNoRelocatingShards(true) .setWaitForEvents(Priority.LANGUID) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 096f533a072b..3fe93f8d91be 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -102,7 +102,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc ClusterHealthResponse clusterHealthResponse = clusterAdmin() // it's OK to timeout here .health( - new ClusterHealthRequest(new String[] {}).waitForYellowStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, new String[] {}).waitForYellowStatus() .masterNodeTimeout(TimeValue.timeValueSeconds(5)) .timeout(TimeValue.timeValueSeconds(5)) ) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index 303030a52366..951ea29a09e8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -74,13 +74,13 @@ public void testFailedSearchWithWrongQuery() throws Exception { allowNodes("test", 2); assertThat( - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").get().isTimedOut(), + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").get().isTimedOut(), equalTo(false) ); logger.info("Running Cluster Health"); ClusterHealthResponse clusterHealth = clusterAdmin().health( - new ClusterHealthRequest("test").waitForYellowStatus() + new ClusterHealthRequest(TEST_REQUEST_TIMEOUT, "test").waitForYellowStatus() .waitForNoRelocatingShards(true) .waitForEvents(Priority.LANGUID) .waitForActiveShards(test.totalNumShards) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java index 8b7f69df9fcc..23146d190725 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CCSUsageTelemetryIT.java @@ -670,7 +670,7 @@ private Map setupClusters() { assertFalse( client(clusterAlias).admin() .cluster() - .prepareHealth(remoteIndex) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index e96689ce2846..9cc359f40d32 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -224,7 +224,7 @@ public void testCancel() throws Exception { assertFalse( client("cluster_a").admin() .cluster() - .prepareHealth("prod") + .prepareHealth(TEST_REQUEST_TIMEOUT, "prod") .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 89bc0e83351a..e772f94e868a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -701,7 +701,7 @@ private Map setupTwoClusters() { assertFalse( client(REMOTE_CLUSTER).admin() .cluster() - .prepareHealth(remoteIndex) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java index a9ae215c1ab7..5c26899f2e3f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchLeakIT.java @@ -110,7 +110,7 @@ public void testSearch() throws Exception { assertFalse( client("cluster_a").admin() .cluster() - .prepareHealth("prod") + .prepareHealth(TEST_REQUEST_TIMEOUT, "prod") .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() @@ -169,7 +169,11 @@ protected void configureRemoteCluster(String clusterAlias, Collection se settings.put("cluster.remote." + clusterAlias + ".mode", "proxy"); settings.put("cluster.remote." + clusterAlias + ".proxy_address", seedAddress); - client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + client().admin() + .cluster() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(settings) + .get(); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index cc272042d538..c89c7d8d749f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -11,7 +11,6 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.logging.log4j.Level; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils; import org.elasticsearch.action.fieldcaps.FieldCapabilities; @@ -38,8 +37,6 @@ import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.SourceLoader; -import org.elasticsearch.index.mapper.StringStoredFieldFieldLoader; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -915,17 +912,6 @@ protected String contentType() { return CONTENT_TYPE; } - @Override - public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - return new StringStoredFieldFieldLoader(fullPath(), leafName()) { - @Override - protected void write(XContentBuilder b, Object value) throws IOException { - BytesRef ref = (BytesRef) value; - b.utf8Value(ref.bytes, ref.offset, ref.length); - } - }; - } - private static final TypeParser PARSER = new FixedTypeParser(c -> new TestMetadataMapper()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index d42a84677a8f..d58e777b093a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -65,7 +65,7 @@ public void testPlugin() throws Exception { .endObject() ) .get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().get(); client().index( new IndexRequest("test").id("1") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java index 8c65d28711c1..32dc34045cc8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/MinimalCompoundRetrieverIT.java @@ -106,7 +106,7 @@ private Map setupTwoClusters() { assertFalse( client(REMOTE_CLUSTER).admin() .cluster() - .prepareHealth(remoteIndex) + .prepareHealth(TEST_REQUEST_TIMEOUT, remoteIndex) .setWaitForYellowStatus() .setTimeout(TimeValue.timeValueSeconds(10)) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java index e618a1b75cc4..e6ecd9f1e377 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/retriever/RetrieverRewriteIT.java @@ -115,7 +115,7 @@ public void testRewriteCompoundRetrieverShouldThrowForPartialResults() throws Ex throw new IllegalStateException("node did not stop"); } assertBusy(() -> { - ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(testIndex) + ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, testIndex) .setWaitForStatus(ClusterHealthStatus.RED) // we are now known red because the primary shard is missing .setWaitForEvents(Priority.LANGUID) // ensures that the update has occurred .execute() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java index 433f004acdd7..17a0d6441ca4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchPreferenceIT.java @@ -59,7 +59,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException { } refresh(); internalCluster().stopRandomDataNode(); - clusterAdmin().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForStatus(ClusterHealthStatus.RED).get(); String[] preferences = new String[] { "_local", "_prefer_nodes:somenode", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java index 816fe48e5d97..439534c3e174 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/routing/SearchReplicaSelectionIT.java @@ -68,7 +68,7 @@ public void testNodeSelection() { client.prepareSearch().setQuery(matchAllQuery()).get().decRef(); } - ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().get(); + ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get(); Map coordinatingNodes = clusterStateResponse.getState().nodes().getCoordinatingOnlyNodes(); assertEquals(1, coordinatingNodes.size()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java index 03c217266d52..24a3d3ac422f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -70,9 +70,9 @@ public void cleanup() throws Exception { public void testSimpleScrollQueryThenFetch() throws Exception { indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 3)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); @@ -119,9 +119,9 @@ public void testSimpleScrollQueryThenFetch() throws Exception { public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws Exception { indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 3)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { String routing = "0"; @@ -189,7 +189,7 @@ public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws E public void testScrollAndUpdateIndex() throws Exception { indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 5)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 500; i++) { prepareIndex("test").setId(Integer.toString(i)) @@ -241,9 +241,9 @@ public void testScrollAndUpdateIndex() throws Exception { public void testSimpleScrollQueryThenFetch_clearScrollIds() throws Exception { indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 3)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); @@ -360,9 +360,9 @@ public void testClearIllegalScrollId() throws Exception { public void testSimpleScrollQueryThenFetchClearAllScrollIds() throws Exception { indicesAdmin().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 3)).get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); - clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); for (int i = 0; i < 100; i++) { prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).get(); @@ -553,7 +553,7 @@ public void testCloseAndReopenOrDeleteWithActiveScroll() { public void testScrollInvalidDefaultKeepAlive() throws IOException { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "1m").put("search.default_keep_alive", "2m")) ); assertThat(exc.getMessage(), containsString("was (2m > 1m)")); @@ -564,7 +564,8 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { exc = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) ); assertThat(exc.getMessage(), containsString("was (3m > 2m)")); @@ -572,7 +573,8 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { exc = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.max_keep_alive", "30s")) + clusterAdmin().prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) + .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "30s")) ); assertThat(exc.getMessage(), containsString("was (1m > 30s)")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java index 23384d1b199f..e5ca2c6968bb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java @@ -156,7 +156,7 @@ public void testSimpleStats() throws Exception { } private Set nodeIdsWithIndex(String... indices) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); Set nodes = new HashSet<>(); for (ShardIterator shardIterator : allAssignedShardsGrouped) { @@ -239,7 +239,7 @@ public void testOpenContexts() { } protected int numAssignedShards(String... indices) { - ClusterState state = clusterAdmin().prepareState().get().getState(); + ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true); return allAssignedShardsGrouped.size(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index 71616abf0dcf..5bdf156e3999 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -62,6 +62,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.getRepositoryDataBlobName; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -153,7 +154,10 @@ public void testRecreateCorruptedRepositoryDuringSnapshotsFails() throws Excepti Settings repoSettings = getRepositoryMetadata(repoName).settings(); Path repo = PathUtils.get(repoSettings.get("location")); - Files.move(repo.resolve("index-" + repositoryData.getGenId()), repo.resolve("index-" + (repositoryData.getGenId() + 1))); + Files.move( + repo.resolve(getRepositoryDataBlobName(repositoryData.getGenId())), + repo.resolve(getRepositoryDataBlobName(repositoryData.getGenId() + 1)) + ); logger.info("--> trying to create another snapshot in order for repository to be marked as corrupt"); final SnapshotException snapshotException = expectThrows( @@ -967,7 +971,7 @@ public void testQueuedSnapshotsWaitingForShardReady() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth(testIndex).get().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, testIndex).get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); @@ -2309,7 +2313,7 @@ private static boolean snapshotHasCompletedShard(String repoName, String snapsho private void corruptIndexN(Path repoPath, long generation) throws IOException { logger.info("--> corrupting [index-{}] in [{}]", generation, repoPath); - Path indexNBlob = repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + generation); + Path indexNBlob = repoPath.resolve(getRepositoryDataBlobName(generation)); assertFileExists(indexNBlob); Files.write(indexNBlob, randomByteArrayOfLength(1), StandardOpenOption.TRUNCATE_EXISTING); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index abcac0cade45..5a82b4b1ab99 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.snapshots; +import org.apache.logging.log4j.Level; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -14,8 +15,10 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -23,6 +26,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Strings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshotsIntegritySuppressor; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.IndexMetaDataGenerations; import org.elasticsearch.repositories.Repository; @@ -32,6 +36,8 @@ import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.MockLog; import org.elasticsearch.xcontent.XContentFactory; import java.nio.channels.SeekableByteChannel; @@ -45,9 +51,14 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.METADATA_NAME_FORMAT; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.SNAPSHOT_INDEX_NAME_FORMAT; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.SNAPSHOT_NAME_FORMAT; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.getRepositoryDataBlobName; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; @@ -80,7 +91,10 @@ public void testRecreateCorruptedRepositoryUnblocksIt() throws Exception { logger.info("--> move index-N blob to next generation"); final RepositoryData repositoryData = getRepositoryData(repoName); - Files.move(repo.resolve("index-" + repositoryData.getGenId()), repo.resolve("index-" + (repositoryData.getGenId() + 1))); + Files.move( + repo.resolve(getRepositoryDataBlobName(repositoryData.getGenId())), + repo.resolve(getRepositoryDataBlobName(repositoryData.getGenId() + 1)) + ); assertRepositoryBlocked(repoName, snapshot); @@ -133,13 +147,19 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { logger.info("--> move index-N blob to next generation"); final RepositoryData repositoryData = getRepositoryData(repoName); - Files.move(repo.resolve("index-" + repositoryData.getGenId()), repo.resolve("index-" + (repositoryData.getGenId() + 1))); + Files.move( + repo.resolve(getRepositoryDataBlobName(repositoryData.getGenId())), + repo.resolve(getRepositoryDataBlobName(repositoryData.getGenId() + 1)) + ); assertRepositoryBlocked(repoName, snapshot); if (randomBoolean()) { logger.info("--> move index-N blob back to initial generation"); - Files.move(repo.resolve("index-" + (repositoryData.getGenId() + 1)), repo.resolve("index-" + repositoryData.getGenId())); + Files.move( + repo.resolve(getRepositoryDataBlobName(repositoryData.getGenId() + 1)), + repo.resolve(getRepositoryDataBlobName(repositoryData.getGenId())) + ); logger.info("--> verify repository remains blocked"); assertRepositoryBlocked(repoName, snapshot); @@ -205,7 +225,7 @@ public void testFindDanglingLatestGeneration() throws Exception { logger.info("--> move index-N blob to next generation"); final RepositoryData repositoryData = getRepositoryData(repoName); final long beforeMoveGen = repositoryData.getGenId(); - Files.move(repo.resolve("index-" + beforeMoveGen), repo.resolve("index-" + (beforeMoveGen + 1))); + Files.move(repo.resolve(getRepositoryDataBlobName(beforeMoveGen)), repo.resolve(getRepositoryDataBlobName(beforeMoveGen + 1))); logger.info("--> set next generation as pending in the cluster state"); updateClusterState( @@ -298,7 +318,7 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { ); // old-format repository has no cluster UUID Files.write( - repo.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + withoutVersions.getGenId()), + repo.resolve(getRepositoryDataBlobName(withoutVersions.getGenId())), BytesReference.toBytes( BytesReference.bytes(withoutVersions.snapshotsToXContent(XContentFactory.jsonBuilder(), IndexVersion.current(), true)) ), @@ -358,7 +378,7 @@ public void testMountCorruptedRepositoryData() throws Exception { logger.info("--> corrupt index-N blob"); final Repository repository = getRepositoryOnMaster(repoName); final RepositoryData repositoryData = getRepositoryData(repoName); - Files.write(repo.resolve("index-" + repositoryData.getGenId()), randomByteArrayOfLength(randomIntBetween(1, 100))); + Files.write(repo.resolve(getRepositoryDataBlobName(repositoryData.getGenId())), randomByteArrayOfLength(randomIntBetween(1, 100))); logger.info("--> verify loading repository data throws RepositoryException"); asInstanceOf( @@ -398,9 +418,9 @@ public void testHandleSnapshotErrorWithBwCFormat() throws Exception { logger.info("--> move shard level metadata to new generation"); final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); - final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); + final Path initialShardMetaPath = shardPath.resolve(Strings.format(SNAPSHOT_INDEX_NAME_FORMAT, "0")); assertFileExists(initialShardMetaPath); - Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "1")); + Files.move(initialShardMetaPath, shardPath.resolve(Strings.format(SNAPSHOT_INDEX_NAME_FORMAT, "1"))); startDeleteSnapshot(repoName, oldVersionSnapshot).get(); @@ -423,9 +443,9 @@ public void testRepairBrokenShardGenerations() throws Exception { logger.info("--> move shard level metadata to new generation and make RepositoryData point at an older generation"); final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); - final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); + final Path initialShardMetaPath = shardPath.resolve(Strings.format(SNAPSHOT_INDEX_NAME_FORMAT, "0")); assertFileExists(initialShardMetaPath); - Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + randomIntBetween(1, 1000))); + Files.move(initialShardMetaPath, shardPath.resolve(Strings.format(SNAPSHOT_INDEX_NAME_FORMAT, randomIntBetween(1, 1000)))); final RepositoryData repositoryData = getRepositoryData(repoName); final Map snapshotIds = repositoryData.getSnapshotIds() @@ -442,7 +462,7 @@ public void testRepairBrokenShardGenerations() throws Exception { repositoryData.getClusterUUID() ); Files.write( - repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData.getGenId()), + repoPath.resolve(getRepositoryDataBlobName(repositoryData.getGenId())), BytesReference.toBytes( BytesReference.bytes(brokenRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), IndexVersion.current())) ), @@ -491,7 +511,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { final Path shardIndexFile = repo.resolve("indices") .resolve(corruptedIndex.getId()) .resolve("0") - .resolve("index-" + repositoryData.shardGenerations().getShardGen(corruptedIndex, 0)); + .resolve(Strings.format(SNAPSHOT_INDEX_NAME_FORMAT, repositoryData.shardGenerations().getShardGen(corruptedIndex, 0))); logger.info("--> truncating shard index file [{}]", shardIndexFile); try (SeekableByteChannel outChan = Files.newByteChannel(shardIndexFile, StandardOpenOption.WRITE)) { @@ -564,10 +584,15 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio Path shardZero = indicesPath.resolve(indexIds.get(index).getId()).resolve("0"); if (randomBoolean()) { Files.delete( - shardZero.resolve("index-" + getRepositoryData("test-repo").shardGenerations().getShardGen(indexIds.get(index), 0)) + shardZero.resolve( + Strings.format( + BlobStoreRepository.SNAPSHOT_INDEX_NAME_FORMAT, + getRepositoryData("test-repo").shardGenerations().getShardGen(indexIds.get(index), 0) + ) + ) ); } - Files.delete(shardZero.resolve("snap-" + snapshotInfo.snapshotId().getUUID() + ".dat")); + Files.delete(shardZero.resolve(Strings.format(SNAPSHOT_NAME_FORMAT, snapshotInfo.snapshotId().getUUID()))); } startDeleteSnapshot("test-repo", "test-snap-1").get(); @@ -608,7 +633,7 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { ); logger.info("--> delete global state metadata"); - Path metadata = repo.resolve("meta-" + createSnapshotResponse.getSnapshotInfo().snapshotId().getUUID() + ".dat"); + Path metadata = repo.resolve(Strings.format(METADATA_NAME_FORMAT, createSnapshotResponse.getSnapshotInfo().snapshotId().getUUID())); Files.delete(metadata); startDeleteSnapshot("test-repo", "test-snap-1").get(); @@ -651,7 +676,9 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { ); logger.info("--> truncate snapshot file to make it unreadable"); - Path snapshotPath = repo.resolve("snap-" + createSnapshotResponse.getSnapshotInfo().snapshotId().getUUID() + ".dat"); + Path snapshotPath = repo.resolve( + Strings.format(SNAPSHOT_NAME_FORMAT, createSnapshotResponse.getSnapshotInfo().snapshotId().getUUID()) + ); try (SeekableByteChannel outChan = Files.newByteChannel(snapshotPath, StandardOpenOption.WRITE)) { outChan.truncate(randomInt(10)); } @@ -698,7 +725,7 @@ public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { SnapshotInfo snapshotInfo = createFullSnapshot("test-repo", "test-snap"); - final Path globalStatePath = repo.resolve("meta-" + snapshotInfo.snapshotId().getUUID() + ".dat"); + final Path globalStatePath = repo.resolve(Strings.format(METADATA_NAME_FORMAT, snapshotInfo.snapshotId().getUUID())); if (randomBoolean()) { // Delete the global state metadata file IOUtils.deleteFilesIgnoringExceptions(globalStatePath); @@ -747,41 +774,131 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); + final boolean repairWithDelete = randomBoolean(); + if (repairWithDelete || randomBoolean()) { + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "snap-for-deletion") + .setWaitForCompletion(true) + .setIndices("test-idx-1") + .get(); + } logger.info("--> deleting shard level index file"); final Path indicesPath = repo.resolve("indices"); for (IndexId indexId : getRepositoryData("test-repo").getIndices().values()) { final Path shardGen; try (Stream shardFiles = Files.list(indicesPath.resolve(indexId.getId()).resolve("0"))) { - shardGen = shardFiles.filter(file -> file.getFileName().toString().startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)) + shardGen = shardFiles.filter(file -> file.getFileName().toString().startsWith(BlobStoreRepository.SNAPSHOT_INDEX_PREFIX)) .findFirst() .orElseThrow(() -> new AssertionError("Failed to find shard index blob")); } Files.delete(shardGen); } - logger.info("--> creating another snapshot"); + if (randomBoolean()) { + logger.info(""" + --> restoring the snapshot, the repository should not have lost any shard data despite deleting index-*, \ + because it uses snap-*.dat files and not the index-* to determine what files to restore"""); + indicesAdmin().prepareDelete("test-idx-1", "test-idx-2").get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-1" + ).setWaitForCompletion(true).get(); + assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); + ensureGreen("test-idx-1", "test-idx-2"); + } + + logger.info("--> creating another snapshot, which should re-create the missing file"); + try ( + var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor(); + var mockLog = MockLog.capture(BlobStoreRepository.class) + ) { + mockLog.addExpectation( + new MockLog.SeenEventExpectation( + "fallback message", + "org.elasticsearch.repositories.blobstore.BlobStoreRepository", + Level.ERROR, + "index [test-idx-1/*] shard generation [*] in [test-repo][*] not found - falling back to reading all shard snapshots" + ) + ); + mockLog.addExpectation( + new MockLog.SeenEventExpectation( + "shard blobs list", + "org.elasticsearch.repositories.blobstore.BlobStoreRepository", + Level.ERROR, + "read shard snapshots [*] due to missing shard generation [*] for index [test-idx-1/*] in [test-repo][*]" + ) + ); + if (repairWithDelete) { + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "snap-for-deletion").get(); + } else if (randomBoolean()) { + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-2" + ).setWaitForCompletion(true).setIndices("test-idx-1").get(); + assertEquals( + createSnapshotResponse.getSnapshotInfo().totalShards(), + createSnapshotResponse.getSnapshotInfo().successfulShards() + ); + } else { + clusterAdmin().prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1", "test-snap-2") + .setIndices("test-idx-1") + .get(); + safeAwait( + ClusterServiceUtils.addTemporaryStateListener( + internalCluster().getInstance(ClusterService.class), + cs -> SnapshotsInProgress.get(cs).isEmpty() + ) + ); + assertThat( + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap-2") + .get() + .getSnapshots() + .get(0) + .shardFailures(), + empty() + ); + } + mockLog.assertAllExpectationsMatched(); + + try ( + Stream shardFiles = Files.list( + indicesPath.resolve(getRepositoryData("test-repo").resolveIndexId("test-idx-1").getId()).resolve("0") + ) + ) { + assertTrue(shardFiles.anyMatch(file -> file.getFileName().toString().startsWith(BlobStoreRepository.INDEX_FILE_PREFIX))); + } + } + + if (randomBoolean()) { + indicesAdmin().prepareDelete("test-idx-1").get(); + RestoreSnapshotResponse restoreSnapshotResponse2 = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + repairWithDelete ? "test-snap-1" : randomFrom("test-snap-1", "test-snap-2") + ).setIndices("test-idx-1").setWaitForCompletion(true).get(); + assertEquals(0, restoreSnapshotResponse2.getRestoreInfo().failedShards()); + ensureGreen("test-idx-1", "test-idx-2"); + } + + logger.info("--> creating another snapshot, which should succeed since the shard gen file now exists again"); CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( TEST_REQUEST_TIMEOUT, "test-repo", - "test-snap-2" + "test-snap-3" ).setWaitForCompletion(true).setIndices("test-idx-1").get(); - assertEquals( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - createSnapshotResponse.getSnapshotInfo().totalShards() - 1 - ); + assertEquals(createSnapshotResponse.getSnapshotInfo().totalShards(), createSnapshotResponse.getSnapshotInfo().successfulShards()); - logger.info( - "--> restoring the first snapshot, the repository should not have lost any shard data despite deleting index-N, " - + "because it uses snap-*.data files and not the index-N to determine what files to restore" - ); - indicesAdmin().prepareDelete("test-idx-1", "test-idx-2").get(); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + indicesAdmin().prepareDelete("test-idx-1").get(); + RestoreSnapshotResponse restoreSnapshotResponse3 = clusterAdmin().prepareRestoreSnapshot( TEST_REQUEST_TIMEOUT, "test-repo", - "test-snap-1" - ).setWaitForCompletion(true).get(); - assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); + repairWithDelete ? randomFrom("test-snap-1", "test-snap-3") : randomFrom("test-snap-1", "test-snap-2", "test-snap-3") + ).setIndices("test-idx-1").setWaitForCompletion(true).get(); + assertEquals(0, restoreSnapshotResponse3.getRestoreInfo().failedShards()); + ensureGreen("test-idx-1", "test-idx-2"); } public void testDeletesWithUnexpectedIndexBlob() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java index 041d72259139..08f9d74ab477 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -106,7 +106,7 @@ public void testShouldRestoreOnlySnapshotMetadata() throws Exception { .setWaitForCompletion(true) .get(); - var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + var metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getMetadata(); logger.info("check that custom persistent metadata [{}] is correctly restored", metadata); if (isSnapshotMetadataSet) { assertThat(metadata.custom(SnapshotMetadata.TYPE).getData(), equalTo("before_snapshot_s")); @@ -127,7 +127,7 @@ public void testShouldKeepGatewayMetadataAfterRestart() throws Exception { internalCluster().fullRestart(); ensureYellow(); - var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + var metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getMetadata(); logger.info("check that gateway custom metadata [{}] survived full cluster restart", metadata); assertThat(metadata.custom(GatewayMetadata.TYPE).getData(), equalTo("before_restart_s_gw")); assertThat(metadata.custom(ApiMetadata.TYPE), nullValue()); @@ -140,7 +140,7 @@ public void testShouldExposeApiMetadata() throws Exception { metadataBuilder.putCustom(NonApiMetadata.TYPE, new NonApiMetadata("before_restart_ns")); })); - var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); + var metadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState().getMetadata(); logger.info("check that api custom metadata [{}] is visible via api", metadata); assertThat(metadata.custom(ApiMetadata.TYPE).getData(), equalTo("before_restart_s_gw")); assertThat(metadata.custom(NonApiMetadata.TYPE), nullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b2b3de51dd04..3788f2dd2cb6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -86,6 +86,7 @@ import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.METADATA_BLOB_NAME_SUFFIX; import static org.elasticsearch.test.NodeRoles.nonMasterNode; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; @@ -244,7 +245,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> shutdown one of the nodes"); internalCluster().stopRandomDataNode(); assertThat( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(1)) .setWaitForNodes("<2") @@ -431,7 +432,7 @@ public boolean clearData(String nodeName) { }); assertThat( - clusterAdmin().prepareHealth() + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT) .setWaitForEvents(Priority.LANGUID) .setTimeout(TimeValue.timeValueMinutes(1)) .setWaitForNodes("2") @@ -665,7 +666,7 @@ public void testRestoreShrinkIndex() throws Exception { assertAcked(indicesAdmin().prepareDelete(sourceIdx).get()); assertAcked(indicesAdmin().prepareDelete(shrunkIdx).get()); internalCluster().stopRandomDataNode(); - clusterAdmin().prepareHealth().setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("1"); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("1"); logger.info("--> start a new data node"); final Settings dataSettings = Settings.builder() @@ -673,7 +674,7 @@ public void testRestoreShrinkIndex() throws Exception { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) // to get a new node id .build(); internalCluster().startDataOnlyNode(dataSettings); - clusterAdmin().prepareHealth().setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("2"); + clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("2"); logger.info("--> restore the shrunk index and ensure all shards are allocated"); RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot) @@ -1126,7 +1127,7 @@ public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth(indexName).get().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, indexName).get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); @@ -1316,7 +1317,7 @@ private static List findRepoMetaBlobs(Path repoPath) throws IOException { List files = new ArrayList<>(); forEachFileRecursively(repoPath.resolve("indices"), ((file, basicFileAttributes) -> { final String fileName = file.getFileName().toString(); - if (fileName.startsWith(BlobStoreRepository.METADATA_PREFIX) && fileName.endsWith(".dat")) { + if (fileName.startsWith(BlobStoreRepository.METADATA_PREFIX) && fileName.endsWith(METADATA_BLOB_NAME_SUFFIX)) { files.add(file); } })); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 477fd9737394..6f02ac5c983a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -40,7 +40,6 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryMissingException; -import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESTestCase; @@ -63,6 +62,7 @@ import java.util.function.Predicate; import java.util.stream.Collectors; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.getRepositoryDataBlobName; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; @@ -1042,7 +1042,7 @@ private static void removeDetailsForRandomSnapshots(String repositoryName, Actio final var repositoryMetadata = repository.getMetadata(); final var repositorySettings = repositoryMetadata.settings(); final var repositoryDataBlobPath = asInstanceOf(FsBlobStore.class, repository.blobStore()).path() - .resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryMetadata.generation()); + .resolve(getRepositoryDataBlobName(repositoryMetadata.generation())); SubscribableListener diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java index fc727007724d..2daa36ee00a0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.IOUtils; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshotsIntegritySuppressor; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.test.ESIntegTestCase; @@ -98,7 +99,7 @@ protected Collection> nodePlugins() { return CollectionUtils.appendToCopy(super.nodePlugins(), getTestTransportPlugin()); } - public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { + public void testConcurrentDeleteFromOtherCluster() { internalCluster().startMasterOnlyNode(); internalCluster().startDataOnlyNode(); final String repoNameOnFirstCluster = "test-repo"; @@ -125,10 +126,13 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { secondCluster.client().admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster, "snap-1").get(); secondCluster.client().admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster, "snap-2").get(); - final SnapshotException sne = expectThrows( - SnapshotException.class, - clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true) - ); + final SnapshotException sne; + try (var ignored = new BlobStoreIndexShardSnapshotsIntegritySuppressor()) { + sne = expectThrows( + SnapshotException.class, + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true) + ); + } assertThat(sne.getMessage(), containsString("failed to update snapshot in repository")); final RepositoryException cause = (RepositoryException) sne.getCause(); assertThat( @@ -138,7 +142,7 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { + repoNameOnFirstCluster + "] concurrent modification of the index-N file, expected current generation [2] but it was not found in " + "the repository. The last cluster to write to this repository was [" - + secondCluster.client().admin().cluster().prepareState().get().getState().metadata().clusterUUID() + + secondCluster.client().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata().clusterUUID() + "] at generation [4]." ) ); @@ -147,7 +151,7 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { createFullSnapshot(repoNameOnFirstCluster, "snap-5"); } - public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedException, IOException { + public void testConcurrentWipeAndRecreateFromOtherCluster() throws IOException { internalCluster().startMasterOnlyNode(); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index d4c0a4c80a3b..6870a1d6b264 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -75,7 +75,12 @@ public void testRepositoryCreation() throws Exception { assertThat(FileSystemUtils.files(location).length, equalTo(numberOfFiles)); logger.info("--> check that repository is really there"); - ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get(); + ClusterStateResponse clusterStateResponse = client.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setMetadata(true) + .get(); Metadata metadata = clusterStateResponse.getState().getMetadata(); RepositoriesMetadata repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE); assertThat(repositoriesMetadata, notNullValue()); @@ -86,7 +91,7 @@ public void testRepositoryCreation() throws Exception { createRepository("test-repo-2", "fs"); logger.info("--> check that both repositories are in cluster state"); - clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get(); + clusterStateResponse = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(true).get(); metadata = clusterStateResponse.getState().getMetadata(); repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE); assertThat(repositoriesMetadata, notNullValue()); @@ -117,7 +122,7 @@ public void testRepositoryCreation() throws Exception { .isAcknowledged(), equalTo(true) ); - assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID()); + assertEquals(beforeStateUuid, client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).clear().get().getState().stateUUID()); logger.info("--> delete repository test-repo-1"); client.admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1").get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceIT.java index 1a54df1f85ed..8a0b74242ba9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceIT.java @@ -22,6 +22,7 @@ import static org.elasticsearch.health.HealthStatus.GREEN; import static org.elasticsearch.health.HealthStatus.YELLOW; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.getRepositoryDataBlobName; import static org.elasticsearch.snapshots.RepositoryIntegrityHealthIndicatorService.NAME; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -73,6 +74,6 @@ private void assertSnapshotRepositoryHealth(String message, Client client, Healt private void corruptRepository(String name, Path location) throws IOException { final RepositoryData repositoryData = getRepositoryData(name); - Files.delete(location.resolve("index-" + repositoryData.getGenId())); + Files.delete(location.resolve(getRepositoryDataBlobName(repositoryData.getGenId()))); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index 7626e59cd1b9..725fcbc1a584 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -224,7 +224,7 @@ public void testRestoreIncreasesPrimaryTerms() { } } - final IndexMetadata indexMetadata = clusterAdmin().prepareState() + final IndexMetadata indexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setIndices(indexName) .setMetadata(true) @@ -251,7 +251,7 @@ public void testRestoreIncreasesPrimaryTerms() { assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(numPrimaries)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); - final IndexMetadata restoredIndexMetadata = clusterAdmin().prepareState() + final IndexMetadata restoredIndexMetadata = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) .clear() .setIndices(indexName) .setMetadata(true) @@ -307,7 +307,13 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that old mapping is restored"); - MappingMetadata mappings = clusterAdmin().prepareState().get().getState().getMetadata().getIndices().get("test-idx").mapping(); + MappingMetadata mappings = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .get() + .getState() + .getMetadata() + .getIndices() + .get("test-idx") + .mapping(); assertThat(mappings.sourceAsMap().toString(), containsString("baz")); assertThat(mappings.sourceAsMap().toString(), not(containsString("foo"))); @@ -818,7 +824,14 @@ public void testRecreateBlocksOnRestore() throws Exception { .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - ClusterBlocks blocks = client.admin().cluster().prepareState().clear().setBlocks(true).get().getState().blocks(); + ClusterBlocks blocks = client.admin() + .cluster() + .prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setBlocks(true) + .get() + .getState() + .blocks(); // compute current index settings (as we cannot query them if they contain SETTING_BLOCKS_METADATA) Settings mergedSettings = Settings.builder().put(initialSettings).put(changedSettings).build(); logger.info("--> merged block settings {}", mergedSettings); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 531e9f4f45af..08daeaaec016 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -86,7 +86,9 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.index.shard.IndexShardTests.getEngineFromShard; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.METADATA_NAME_FORMAT; import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.READONLY_SETTING_KEY; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.SNAPSHOT_NAME_FORMAT; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -226,7 +228,7 @@ public void testBasicWorkFlow() throws Exception { ensureGreen(); assertDocCount("test-idx-1", 100); - ClusterState clusterState = clusterAdmin().prepareState().get().getState(); + ClusterState clusterState = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex("test-idx-1"), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex("test-idx-2"), equalTo(false)); @@ -509,7 +511,7 @@ public void testDataFileFailureDuringRestore() throws Exception { // same node again during the same reroute operation. Then another reroute // operation is scheduled, but the RestoreInProgressAllocationDecider will // block the shard to be assigned again because it failed during restore. - final ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().get(); + final ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get(); assertEquals(1, clusterStateResponse.getState().getNodes().getDataNodes().size()); assertEquals( restoreInfo.failedShards(), @@ -661,7 +663,10 @@ private void unrestorableUseCase( assertThat(restoreResponse.getRestoreInfo().totalShards(), equalTo(numShards.numPrimaries)); assertThat(restoreResponse.getRestoreInfo().successfulShards(), equalTo(0)); - ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState().setCustoms(true).setRoutingTable(true).get(); + ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .setCustoms(true) + .setRoutingTable(true) + .get(); // check that there is no restore in progress RestoreInProgress restoreInProgress = clusterStateResponse.getState().custom(RestoreInProgress.TYPE); @@ -865,7 +870,7 @@ public void testSnapshotClosedIndex() throws Exception { ensureGreen(); logger.info("--> closing index test-idx-closed"); assertAcked(client.admin().indices().prepareClose("test-idx-closed")); - ClusterStateResponse stateResponse = client.admin().cluster().prepareState().get(); + ClusterStateResponse stateResponse = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get(); assertThat(stateResponse.getState().metadata().index("test-idx-closed").getState(), equalTo(IndexMetadata.State.CLOSE)); assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), notNullValue()); @@ -1259,7 +1264,7 @@ public void testSnapshotRelocatingPrimary() throws Exception { logger.info("--> wait for relocations to start"); assertBusy( - () -> assertThat(clusterAdmin().prepareHealth("test-idx").get().getRelocatingShards(), greaterThan(0)), + () -> assertThat(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, "test-idx").get().getRelocatingShards(), greaterThan(0)), 1L, TimeUnit.MINUTES ); @@ -1578,7 +1583,7 @@ public void testListCorruptedSnapshot() throws Exception { final SnapshotInfo snapshotInfo = createSnapshot("test-repo", "test-snap-2", Collections.singletonList("test-idx-*")); logger.info("--> truncate snapshot file to make it unreadable"); - Path snapshotPath = repo.resolve("snap-" + snapshotInfo.snapshotId().getUUID() + ".dat"); + Path snapshotPath = repo.resolve(Strings.format(SNAPSHOT_NAME_FORMAT, snapshotInfo.snapshotId().getUUID())); try (SeekableByteChannel outChan = Files.newByteChannel(snapshotPath, StandardOpenOption.WRITE)) { outChan.truncate(randomInt(10)); } @@ -1621,7 +1626,7 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { final String snapshotName = "test-snap"; final SnapshotInfo snapshotInfo = createFullSnapshot(repoName, snapshotName); - final Path globalStatePath = repo.resolve("meta-" + snapshotInfo.snapshotId().getUUID() + ".dat"); + final Path globalStatePath = repo.resolve(Strings.format(METADATA_NAME_FORMAT, snapshotInfo.snapshotId().getUUID())); try (SeekableByteChannel outChan = Files.newByteChannel(globalStatePath, StandardOpenOption.WRITE)) { outChan.truncate(randomInt(10)); } @@ -1701,7 +1706,10 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { final Path indexMetadataPath = repo.resolve("indices") .resolve(corruptedIndex.getId()) .resolve( - "meta-" + repositoryData.indexMetaDataGenerations().indexMetaBlobId(snapshotInfo.snapshotId(), corruptedIndex) + ".dat" + Strings.format( + METADATA_NAME_FORMAT, + repositoryData.indexMetaDataGenerations().indexMetaBlobId(snapshotInfo.snapshotId(), corruptedIndex) + ) ); // Truncate the index metadata file @@ -2180,7 +2188,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()) ); assertThat(restoreSnapshotResponse.getRestoreInfo().indices(), containsInAnyOrder(normalIndex, hiddenIndex, dottedHiddenIndex)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex(normalIndex), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex(hiddenIndex), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex(dottedHiddenIndex), equalTo(true)); @@ -2200,7 +2208,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()) ); assertThat(restoreSnapshotResponse.getRestoreInfo().indices(), containsInAnyOrder(normalIndex, hiddenIndex)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex(normalIndex), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex(hiddenIndex), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex(dottedHiddenIndex), equalTo(false)); @@ -2220,7 +2228,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()) ); assertThat(restoreSnapshotResponse.getRestoreInfo().indices(), containsInAnyOrder(hiddenIndex)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex(normalIndex), equalTo(false)); assertThat(clusterState.getMetadata().hasIndex(hiddenIndex), equalTo(true)); assertThat(clusterState.getMetadata().hasIndex(dottedHiddenIndex), equalTo(false)); @@ -2240,7 +2248,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards()) ); assertThat(restoreSnapshotResponse.getRestoreInfo().indices(), containsInAnyOrder(dottedHiddenIndex)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); + ClusterState clusterState = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); assertThat(clusterState.getMetadata().hasIndex(normalIndex), equalTo(false)); assertThat(clusterState.getMetadata().hasIndex(hiddenIndex), equalTo(false)); assertThat(clusterState.getMetadata().hasIndex(dottedHiddenIndex), equalTo(true)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java index 6c91db0ad722..92e0b437cbeb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java @@ -36,14 +36,14 @@ public void testExceptionWhenRestoringPersistentSettings() { Client client = client(); Consumer setSettingValue = value -> client.admin() .cluster() - .prepareUpdateSettings() + .prepareUpdateSettings(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT) .setPersistentSettings(Settings.builder().put(BrokenSettingPlugin.BROKEN_SETTING.getKey(), value)) .get(); Consumer assertSettingValue = value -> assertThat( client.admin() .cluster() - .prepareState() + .prepareState(TEST_REQUEST_TIMEOUT) .setRoutingTable(false) .setNodes(false) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java index 8f2702099c10..a3f1f0038a03 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java @@ -18,15 +18,12 @@ import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.ingest.IngestTestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.StoredScriptsIT; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentType; import java.util.Arrays; import java.util.Collection; @@ -36,7 +33,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateMissing; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -84,18 +80,16 @@ public void testIncludeGlobalState() throws Exception { if (testPipeline) { logger.info("--> creating test pipeline"); - BytesReference pipelineSource = BytesReference.bytes( - jsonBuilder().startObject() - .field("description", "my_pipeline") + putJsonPipeline( + "barbaz", + (builder, params) -> builder.field("description", "my_pipeline") .startArray("processors") .startObject() .startObject("test") .endObject() .endObject() .endArray() - .endObject() ); - assertAcked(clusterAdmin().preparePutPipeline("barbaz", pipelineSource, XContentType.JSON).get()); } if (testScript) { @@ -144,7 +138,7 @@ public void testIncludeGlobalState() throws Exception { if (testPipeline) { logger.info("--> delete test pipeline"); - assertAcked(clusterAdmin().deletePipeline(new DeletePipelineRequest("barbaz")).get()); + deletePipeline("barbaz"); } if (testScript) { @@ -184,7 +178,7 @@ public void testIncludeGlobalState() throws Exception { if (testPipeline) { logger.info("--> check that pipeline is restored"); - GetPipelineResponse getPipelineResponse = clusterAdmin().prepareGetPipeline("barbaz").get(); + GetPipelineResponse getPipelineResponse = getPipelines("barbaz"); assertTrue(getPipelineResponse.isFound()); } @@ -218,7 +212,7 @@ public void testIncludeGlobalState() throws Exception { cluster().wipeTemplates("test-template"); } if (testPipeline) { - assertAcked(clusterAdmin().deletePipeline(new DeletePipelineRequest("barbaz")).get()); + deletePipeline("barbaz"); } if (testScript) { @@ -245,7 +239,7 @@ public void testIncludeGlobalState() throws Exception { logger.info("--> check that global state wasn't restored but index was"); getIndexTemplatesResponse = indicesAdmin().prepareGetTemplates().get(); assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); - assertFalse(clusterAdmin().prepareGetPipeline("barbaz").get().isFound()); + assertFalse(getPipelines("barbaz").isFound()); assertNull(safeExecute(GetStoredScriptAction.INSTANCE, new GetStoredScriptRequest(TEST_REQUEST_TIMEOUT, "foobar")).getSource()); assertDocCount("test-idx", 100L); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index 2d1e16dc6427..3d16293c1462 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -177,7 +177,12 @@ public void testRemoveNodeAndFailoverMasterDuringSnapshot() throws Exception { SubscribableListener.newForked( l -> client().execute( TransportAddVotingConfigExclusionsAction.TYPE, - new AddVotingConfigExclusionsRequest(Strings.EMPTY_ARRAY, new String[] { masterName }, TimeValue.timeValueSeconds(10)), + new AddVotingConfigExclusionsRequest( + TEST_REQUEST_TIMEOUT, + Strings.EMPTY_ARRAY, + new String[] { masterName }, + TimeValue.timeValueSeconds(10) + ), l ) ) @@ -212,7 +217,7 @@ public void testRemoveNodeAndFailoverMasterDuringSnapshot() throws Exception { // flush master queue to ensure the completion is applied everywhere safeAwait( SubscribableListener.newForked( - l -> client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute(l) + l -> client().admin().cluster().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).execute(l) ) ); @@ -230,7 +235,7 @@ public void testRemoveNodeAndFailoverMasterDuringSnapshot() throws Exception { } safeAwait(SubscribableListener.newForked(l -> { - final var clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(); + final var clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(TEST_REQUEST_TIMEOUT); clearVotingConfigExclusionsRequest.setWaitForRemoval(false); client().execute(TransportClearVotingConfigExclusionsAction.TYPE, clearVotingConfigExclusionsRequest, l); })); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index b155ef73783e..d4e27d5630c5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -45,6 +44,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.SNAPSHOT_NAME_FORMAT; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anEmptyMap; @@ -142,7 +142,7 @@ public void testExceptionOnMissingSnapBlob() throws IOException { final SnapshotInfo snapshotInfo = createFullSnapshot("test-repo", "test-snap"); logger.info("--> delete snap-${uuid}.dat file for this snapshot to simulate concurrent delete"); - IOUtils.rm(repoPath.resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat")); + IOUtils.rm(repoPath.resolve(Strings.format(SNAPSHOT_NAME_FORMAT, snapshotInfo.snapshotId().getUUID()))); expectThrows( SnapshotMissingException.class, @@ -173,7 +173,7 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { repoPath.resolve("indices") .resolve(indexRepoId) .resolve("0") - .resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat") + .resolve(Strings.format(SNAPSHOT_NAME_FORMAT, snapshotInfo.snapshotId().getUUID())) ); expectThrows( @@ -738,7 +738,7 @@ public void testInfiniteTimeout() throws Exception { try { waitForBlockOnAnyDataNode("test-repo"); // Make sure that the create-snapshot task completes on master - assertFalse(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); + assertFalse(clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForEvents(Priority.LANGUID).get().isTimedOut()); final List snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setMasterNodeTimeout(TimeValue.MINUS_ONE) .get() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index 9c9076dff00e..e8d61e4677c9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -89,7 +89,12 @@ public class SnapshotStressTestsIT extends AbstractSnapshotIntegTestCase { public void testRandomActivities() throws InterruptedException { - final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState().clear().setNodes(true).get().getState().nodes(); + final DiscoveryNodes discoveryNodes = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT) + .clear() + .setNodes(true) + .get() + .getState() + .nodes(); new TrackedCluster(internalCluster(), nodeNames(discoveryNodes.getMasterNodes()), nodeNames(discoveryNodes.getDataNodes())).run(); disableRepoConsistencyCheck("have not necessarily written to all repositories"); } @@ -354,14 +359,20 @@ public void run() throws InterruptedException { if (failedPermitAcquisitions.isEmpty() == false) { logger.warn("--> failed to acquire all permits: {}", failedPermitAcquisitions); - logger.info("--> current cluster state:\n{}", Strings.toString(clusterAdmin().prepareState().get().getState(), true, true)); + logger.info( + "--> current cluster state:\n{}", + Strings.toString(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), true, true) + ); fail("failed to acquire all permits: " + failedPermitAcquisitions); } logger.info("--> acquired all permits"); if (ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS) == false) { logger.warn("--> threadpool termination timed out"); - logger.info("--> current cluster state:\n{}", Strings.toString(clusterAdmin().prepareState().get().getState(), true, true)); + logger.info( + "--> current cluster state:\n{}", + Strings.toString(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), true, true) + ); } } @@ -381,7 +392,7 @@ private void acquirePermitsAtEnd( logger.warn("--> failed to acquire permit [{}]", label); logger.info( "--> current cluster state:\n{}", - Strings.toString(clusterAdmin().prepareState().get().getState(), true, true) + Strings.toString(clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(), true, true) ); HotThreads.logLocalHotThreads( logger, @@ -1604,7 +1615,7 @@ Releasable tryAcquirePartialSnapshottingPermit() { // Prepares a health request with twice the default (30s) timeout that waits for all cluster tasks to finish as well as all cluster // nodes before returning private static ClusterHealthRequestBuilder prepareClusterHealthRequest(String... targetIndexNames) { - return clusterAdmin().prepareHealth(targetIndexNames) + return clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT, targetIndexNames) .setTimeout(TimeValue.timeValueSeconds(60)) .setWaitForNodes(Integer.toString(internalCluster().getNodeNames().length)) .setWaitForEvents(Priority.LANGUID); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java index 706ceaad7905..32f76bdcc61e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java @@ -704,7 +704,11 @@ public void testPartialSnapshotsOfSystemIndexRemovesFeatureState() throws Except // Stop a random data node so we lose a shard from the partial index internalCluster().stopRandomDataNode(); - assertBusy(() -> assertEquals(ClusterHealthStatus.RED, clusterAdmin().prepareHealth().get().getStatus()), 30, TimeUnit.SECONDS); + assertBusy( + () -> assertEquals(ClusterHealthStatus.RED, clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).get().getStatus()), + 30, + TimeUnit.SECONDS + ); // Get ready to block blockMasterFromFinalizingSnapshotOnIndexFile(REPO_NAME); diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index d7db8f4ec09d..c7f3e25b6a96 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1646,12 +1646,7 @@ private enum ElasticsearchExceptionHandle { UNKNOWN_VERSION_ADDED ), // 127 used to be org.elasticsearch.search.SearchContextException - SEARCH_SOURCE_BUILDER_EXCEPTION( - org.elasticsearch.search.builder.SearchSourceBuilderException.class, - org.elasticsearch.search.builder.SearchSourceBuilderException::new, - 128, - UNKNOWN_VERSION_ADDED - ), + // 128 used to be org.elasticsearch.search.builder.SearchSourceBuilderException // 129 was EngineClosedException NO_SHARD_AVAILABLE_ACTION_EXCEPTION( org.elasticsearch.action.NoShardAvailableActionException.class, diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 6640b8b5eac8..1699efb02dd1 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -206,6 +206,9 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_ADD_INDEX_MODE_CONCRETE_INDICES = def(8_736_00_0); public static final TransportVersion UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH = def(8_737_00_0); public static final TransportVersion ESQL_AGGREGATE_EXEC_TRACKS_INTERMEDIATE_ATTRS = def(8_738_00_0); + public static final TransportVersion CCS_TELEMETRY_STATS = def(8_739_00_0); + public static final TransportVersion GLOBAL_RETENTION_TELEMETRY = def(8_740_00_0); + public static final TransportVersion ROUTING_TABLE_VERSION_REMOVED = def(8_741_00_0); /* * STOP! READ THIS FIRST! No, really, @@ -264,6 +267,8 @@ static TransportVersion def(int id) { * Reference to the earliest compatible transport version to this version of the codebase. * This should be the transport version used by the highest minor version of the previous major. */ + @UpdateForV9 + // This needs to be bumped to the 8.last public static final TransportVersion MINIMUM_COMPATIBLE = V_7_17_0; /** diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 54b6b1ef9c8c..1f3ab7388927 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -13,8 +13,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Assertions; -import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -124,6 +124,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_22 = new Version(7_17_22_99); public static final Version V_7_17_23 = new Version(7_17_23_99); public static final Version V_7_17_24 = new Version(7_17_24_99); + public static final Version V_7_17_25 = new Version(7_17_25_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); @@ -184,7 +185,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_1 = new Version(8_15_01_99); public static final Version V_8_15_2 = new Version(8_15_02_99); public static final Version V_8_16_0 = new Version(8_16_00_99); - public static final Version CURRENT = V_8_16_0; + public static final Version V_9_0_0 = new Version(9_00_00_99); + public static final Version CURRENT = V_9_0_0; private static final NavigableMap VERSION_IDS; private static final Map VERSION_STRINGS; @@ -220,14 +222,7 @@ public class Version implements VersionId, ToXContentFragment { } } } - assert RestApiVersion.current().major == CURRENT.major && RestApiVersion.previous().major == CURRENT.major - 1 - : "RestApiVersion must be upgraded " - + "to reflect major from Version.CURRENT [" - + CURRENT.major - + "]" - + " but is still set to [" - + RestApiVersion.current().major - + "]"; + assertRestApiVersion(); builder.put(V_EMPTY_ID, V_EMPTY); builderByString.put(V_EMPTY.toString(), V_EMPTY); @@ -235,6 +230,19 @@ public class Version implements VersionId, ToXContentFragment { VERSION_STRINGS = Map.copyOf(builderByString); } + @UpdateForV9 + // Re-enable this assertion once the rest api version is bumped + private static void assertRestApiVersion() { + // assert RestApiVersion.current().major == CURRENT.major && RestApiVersion.previous().major == CURRENT.major - 1 + // : "RestApiVersion must be upgraded " + // + "to reflect major from Version.CURRENT [" + // + CURRENT.major + // + "]" + // + " but is still set to [" + // + RestApiVersion.current().major + // + "]"; + } + public static Version readVersion(StreamInput in) throws IOException { return fromId(in.readVInt()); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index 82e4e4123e4f..139ef58e4292 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -40,24 +40,28 @@ public class AddVotingConfigExclusionsRequest extends MasterNodeRequest { - public Request() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public Request(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java index 11bdd41f458d..f71def58820a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java @@ -9,7 +9,6 @@ package org.elasticsearch.action.admin.cluster.desirednodes; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; @@ -27,6 +26,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; @@ -102,17 +102,12 @@ public ClusterState afterBatchExecution(ClusterState clusterState, boolean clust } public static class Request extends AcknowledgedRequest { - public Request() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout, ackTimeout); } public Request(StreamInput in) throws IOException { super(in); } - - @Override - public ActionRequestValidationException validate() { - return null; - } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java index 3d8cdb4b405f..550db4892a67 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -47,8 +48,15 @@ public class UpdateDesiredNodesRequest extends AcknowledgedRequest DesiredNode.fromXContent(p), NODES_FIELD); } - public UpdateDesiredNodesRequest(String historyID, long version, List nodes, boolean dryRun) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public UpdateDesiredNodesRequest( + TimeValue masterNodeTimeout, + TimeValue ackTimeout, + String historyID, + long version, + List nodes, + boolean dryRun + ) { + super(masterNodeTimeout, ackTimeout); assert historyID != null; assert nodes != null; this.historyID = historyID; @@ -80,10 +88,16 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static UpdateDesiredNodesRequest fromXContent(String historyID, long version, boolean dryRun, XContentParser parser) - throws IOException { + public static UpdateDesiredNodesRequest fromXContent( + TimeValue masterNodeTimeout, + TimeValue ackTimeout, + String historyID, + long version, + boolean dryRun, + XContentParser parser + ) throws IOException { List nodes = PARSER.parse(parser, null); - return new UpdateDesiredNodesRequest(historyID, version, nodes, dryRun); + return new UpdateDesiredNodesRequest(masterNodeTimeout, ackTimeout, historyID, version, nodes, dryRun); } public String getHistoryID() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 2b60e2d4a5ff..2344c8f99ced 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -37,12 +37,12 @@ public class ClusterHealthRequest extends MasterNodeReadRequest { - public ClusterHealthRequestBuilder(ElasticsearchClient client) { - super(client, TransportClusterHealthAction.TYPE, new ClusterHealthRequest()); + public ClusterHealthRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout) { + super(client, TransportClusterHealthAction.TYPE, new ClusterHealthRequest(masterNodeTimeout)); } public ClusterHealthRequestBuilder setIndices(String... indices) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusRequest.java index ba2ad88917a9..dfb229c4d533 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusRequest.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -19,8 +20,8 @@ */ public class GetFeatureUpgradeStatusRequest extends MasterNodeRequest { - public GetFeatureUpgradeStatusRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetFeatureUpgradeStatusRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public GetFeatureUpgradeStatusRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java index 36a90ae9afe3..84321de66a33 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeRequest.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -19,8 +20,8 @@ */ public class PostFeatureUpgradeRequest extends MasterNodeRequest { - public PostFeatureUpgradeRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public PostFeatureUpgradeRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public PostFeatureUpgradeRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java index 5bde01195e35..1d70d25478b1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java @@ -33,8 +33,8 @@ public class PrevalidateNodeRemovalRequest extends MasterNodeReadRequest { - public Request() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public Request(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index c4e40f1b208b..bf874c3c3887 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -35,10 +36,13 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest PARSER = new ObjectParser<>( + public interface Factory { + ClusterUpdateSettingsRequest create(); + } + + private static final ObjectParser PARSER = ObjectParser.fromBuilder( "cluster_update_settings_request", - false, - ClusterUpdateSettingsRequest::new + Factory::create ); static { @@ -55,8 +59,8 @@ public ClusterUpdateSettingsRequest(StreamInput in) throws IOException { persistentSettings = readSettingsFromStream(in); } - public ClusterUpdateSettingsRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public ClusterUpdateSettingsRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout, ackTimeout); } @Override @@ -188,7 +192,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static ClusterUpdateSettingsRequest fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + public static ClusterUpdateSettingsRequest fromXContent(Factory factory, XContentParser parser) { + return PARSER.apply(parser, factory); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 31ab3223bbb4..a7682add8b09 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.XContentType; import java.util.Map; @@ -23,8 +24,8 @@ public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuil ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> { - public ClusterUpdateSettingsRequestBuilder(ElasticsearchClient client) { - super(client, ClusterUpdateSettingsAction.INSTANCE, new ClusterUpdateSettingsRequest()); + public ClusterUpdateSettingsRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(client, ClusterUpdateSettingsAction.INSTANCE, new ClusterUpdateSettingsRequest(masterNodeTimeout, ackTimeout)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java index 8990112a3057..d64de6e452e7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java @@ -38,8 +38,8 @@ public class ClusterStateRequest extends MasterNodeReadRequest { - public ClusterStateRequestBuilder(ElasticsearchClient client) { - super(client, ClusterStateAction.INSTANCE, new ClusterStateRequest()); + public ClusterStateRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout) { + super(client, ClusterStateAction.INSTANCE, new ClusterStateRequest(masterNodeTimeout)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java index fe1da86dd54c..68fd4c2a1529 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSTelemetrySnapshot.java @@ -277,7 +277,7 @@ public int hashCode() { */ public void add(CCSTelemetrySnapshot stats) { // This should be called in ClusterStatsResponse ctor only, so we don't need to worry about concurrency - if (stats.totalCount == 0) { + if (stats == null || stats.totalCount == 0) { // Just ignore the empty stats. // This could happen if the node is brand new or if the stats are not available, e.g. because it runs an old version. return; @@ -315,7 +315,7 @@ public void add(CCSTelemetrySnapshot stats) { * "p90": 2570 * } */ - public static void publishLatency(XContentBuilder builder, String name, LongMetricValue took) throws IOException { + private static void publishLatency(XContentBuilder builder, String name, LongMetricValue took) throws IOException { builder.startObject(name); { builder.field("max", took.max()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java index 60766bd4068e..6016378aa886 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/CCSUsageTelemetry.java @@ -175,7 +175,7 @@ public static class PerClusterCCSTelemetry { // The number of successful (not skipped) requests to this cluster. private final LongAdder count; private final LongAdder skippedCount; - // This is only over the successful requetss, skipped ones do not count here. + // This is only over the successful requests, skipped ones do not count here. private final LongMetric took; PerClusterCCSTelemetry(String clusterAlias) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index b48295dc8b3e..732eb2ec2dcc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -30,6 +30,7 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { private final ClusterHealthStatus clusterStatus; private final SearchUsageStats searchUsageStats; private final RepositoryUsageStats repositoryUsageStats; + private final CCSTelemetrySnapshot ccsMetrics; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); @@ -47,6 +48,11 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { } else { repositoryUsageStats = RepositoryUsageStats.EMPTY; } + if (in.getTransportVersion().onOrAfter(TransportVersions.CCS_TELEMETRY_STATS)) { + ccsMetrics = new CCSTelemetrySnapshot(in); + } else { + ccsMetrics = new CCSTelemetrySnapshot(); + } } public ClusterStatsNodeResponse( @@ -56,7 +62,8 @@ public ClusterStatsNodeResponse( NodeStats nodeStats, ShardStats[] shardsStats, SearchUsageStats searchUsageStats, - RepositoryUsageStats repositoryUsageStats + RepositoryUsageStats repositoryUsageStats, + CCSTelemetrySnapshot ccsTelemetrySnapshot ) { super(node); this.nodeInfo = nodeInfo; @@ -65,6 +72,7 @@ public ClusterStatsNodeResponse( this.clusterStatus = clusterStatus; this.searchUsageStats = Objects.requireNonNull(searchUsageStats); this.repositoryUsageStats = Objects.requireNonNull(repositoryUsageStats); + this.ccsMetrics = ccsTelemetrySnapshot; } public NodeInfo nodeInfo() { @@ -95,6 +103,10 @@ public RepositoryUsageStats repositoryUsageStats() { return repositoryUsageStats; } + public CCSTelemetrySnapshot getCcsMetrics() { + return ccsMetrics; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -108,5 +120,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { repositoryUsageStats.writeTo(out); } // else just drop these stats, ok for bwc + if (out.getTransportVersion().onOrAfter(TransportVersions.CCS_TELEMETRY_STATS)) { + ccsMetrics.writeTo(out); + } } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index b6dd40e8c8b7..267db92496f7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.Locale; +import static org.elasticsearch.action.search.TransportSearchAction.CCS_TELEMETRY_FEATURE_FLAG; + public class ClusterStatsResponse extends BaseNodesResponse implements ToXContentFragment { final ClusterStatsNodes nodesStats; @@ -31,6 +33,8 @@ public class ClusterStatsResponse extends BaseNodesResponse ccsMetrics.add(node.getCcsMetrics())); this.status = status; this.clusterSnapshotStats = clusterSnapshotStats; @@ -90,6 +96,10 @@ public ClusterStatsIndices getIndicesStats() { return indicesStats; } + public CCSTelemetrySnapshot getCcsMetrics() { + return ccsMetrics; + } + @Override public void writeTo(StreamOutput out) throws IOException { TransportAction.localOnly(); @@ -125,6 +135,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("repositories"); repositoryUsageStats.toXContent(builder, params); + if (CCS_TELEMETRY_FEATURE_FLAG.isEnabled()) { + builder.startObject("ccs"); + ccsMetrics.toXContent(builder, params); + builder.endObject(); + } + return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 1912de3cfa4d..66cf627ce066 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -81,6 +81,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final IndicesService indicesService; private final RepositoriesService repositoriesService; private final SearchUsageHolder searchUsageHolder; + private final CCSUsageTelemetry ccsUsageHolder; private final MetadataStatsCache mappingStatsCache; private final MetadataStatsCache analysisStatsCache; @@ -108,6 +109,7 @@ public TransportClusterStatsAction( this.indicesService = indicesService; this.repositoriesService = repositoriesService; this.searchUsageHolder = usageService.getSearchUsageHolder(); + this.ccsUsageHolder = usageService.getCcsUsageHolder(); this.mappingStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), MappingStats::of); this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); } @@ -249,6 +251,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq final SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); final RepositoryUsageStats repositoryUsageStats = repositoriesService.getUsageStats(); + final CCSTelemetrySnapshot ccsTelemetry = ccsUsageHolder.getCCSTelemetrySnapshot(); return new ClusterStatsNodeResponse( nodeInfo.getNode(), @@ -257,7 +260,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]), searchUsageStats, - repositoryUsageStats + repositoryUsageStats, + ccsTelemetry ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java index b27a8d0aacb7..976948dc722f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java @@ -11,13 +11,14 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; public class PendingClusterTasksRequest extends MasterNodeReadRequest { - public PendingClusterTasksRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public PendingClusterTasksRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public PendingClusterTasksRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 5d1b7264ebf8..152cd6a34196 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -67,8 +67,9 @@ public class CreateIndexRequest extends AcknowledgedRequest private boolean initializeFailureStore; private Settings settings = Settings.EMPTY; + public static final String EMPTY_MAPPINGS = "{}"; - private String mappings = "{}"; + private String mappings = EMPTY_MAPPINGS; private final Set aliases = new HashSet<>(); @@ -284,8 +285,11 @@ private CreateIndexRequest mapping(BytesReference source, XContentType xContentT } private CreateIndexRequest mapping(String type, Map source) { - // wrap it in a type map if its not - if (source.size() != 1 || source.containsKey(type) == false) { + if (source.isEmpty()) { + // If no source is provided we return empty mappings + return mapping(EMPTY_MAPPINGS); + } else if (source.size() != 1 || source.containsKey(type) == false) { + // wrap it in a type map if its not source = Map.of(MapperService.SINGLE_MAPPING_NAME, source); } else if (MapperService.SINGLE_MAPPING_NAME.equals(type) == false) { // if it has a different type name, then unwrap and rewrap with _doc diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index b8d975f82980..928948059df9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -660,7 +660,7 @@ static void validate( } if ((request.settings().equals(Settings.EMPTY) == false) || (request.aliases().size() > 0) - || (request.mappings().equals("{}") == false)) { + || (request.mappings().equals(CreateIndexRequest.EMPTY_MAPPINGS) == false)) { throw new IllegalArgumentException( "aliases, mappings, and index settings may not be specified when rolling over a data stream" ); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 813203afe42c..64e0b80aca74 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -44,6 +44,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.node.NodeClosedException; @@ -478,7 +479,8 @@ private void completeShardOperation() { } private void processFailure(BulkItemRequest bulkItemRequest, Exception cause) { - var errorType = ElasticsearchException.getExceptionName(ExceptionsHelper.unwrapCause(cause)); + var error = ExceptionsHelper.unwrapCause(cause); + var errorType = ElasticsearchException.getExceptionName(error); DocWriteRequest docWriteRequest = bulkItemRequest.request(); DataStream failureStoreCandidate = getRedirectTargetCandidate(docWriteRequest, getClusterState().metadata()); // If the candidate is not null, the BulkItemRequest targets a data stream, but we'll still have to check if @@ -486,7 +488,9 @@ private void processFailure(BulkItemRequest bulkItemRequest, Exception cause) { if (failureStoreCandidate != null) { // Do not redirect documents to a failure store that were already headed to one. var isFailureStoreDoc = docWriteRequest instanceof IndexRequest indexRequest && indexRequest.isWriteToFailureStore(); - if (isFailureStoreDoc == false && failureStoreCandidate.isFailureStoreEnabled()) { + if (isFailureStoreDoc == false + && failureStoreCandidate.isFailureStoreEnabled() + && error instanceof VersionConflictEngineException == false) { // Redirect to failure store. maybeMarkFailureStoreForRollover(failureStoreCandidate); addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreCandidate.getName()); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java index 7c788b10405f..30c6699ac902 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,11 +48,6 @@ public Request(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name, l this.startTime = startTime; } - @Deprecated(forRemoval = true) // temporary compatibility shim - public Request(String name) { - this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, name); - } - public String getName() { return name; } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java index 1a62e347012f..d95a8bbc4b46 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/DeleteDataStreamAction.java @@ -53,11 +53,6 @@ public Request(TimeValue masterNodeTimeout, String... names) { this.wildcardExpressionsOriginallySpecified = Arrays.stream(names).anyMatch(Regex::isSimpleMatchPattern); } - @Deprecated(forRemoval = true) // temporary compatibility shim - public Request(String... names) { - this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, names); - } - public String[] getNames() { return names; } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 2bcd824dfea3..2f9ba9220fbc 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.DataStream; @@ -71,11 +70,6 @@ public Request(TimeValue masterNodeTimeout, String[] names, boolean includeDefau this.includeDefaults = includeDefaults; } - @Deprecated(forRemoval = true) // temporary compatibility shim - public Request(String[] names) { - this(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, names); - } - public String[] getNames() { return names; } diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index 7d2b1be79731..8ccc190a0444 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -45,20 +45,21 @@ public static class Request extends MasterNodeRequest implements Indice private DownsampleConfig downsampleConfig; public Request( + TimeValue masterNodeTimeout, final String sourceIndex, final String targetIndex, final TimeValue waitTimeout, final DownsampleConfig downsampleConfig ) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + super(masterNodeTimeout); this.sourceIndex = sourceIndex; this.targetIndex = targetIndex; this.waitTimeout = waitTimeout == null ? DEFAULT_WAIT_TIMEOUT : waitTimeout; this.downsampleConfig = downsampleConfig; } - public Request() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public Request(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public Request(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java deleted file mode 100644 index ef08f64765f9..000000000000 --- a/server/src/main/java/org/elasticsearch/action/ingest/DeletePipelineRequestBuilder.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.ingest; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.internal.ElasticsearchClient; - -public class DeletePipelineRequestBuilder extends ActionRequestBuilder { - - public DeletePipelineRequestBuilder(ElasticsearchClient client, String id) { - super(client, DeletePipelineTransportAction.TYPE, new DeletePipelineRequest(id)); - } - - /** - * Sets the id of the pipeline to delete. - */ - public DeletePipelineRequestBuilder setId(String id) { - request.setId(id); - return this; - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java deleted file mode 100644 index ca873c5aa384..000000000000 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineRequestBuilder.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.ingest; - -import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; - -public class GetPipelineRequestBuilder extends MasterNodeReadOperationRequestBuilder< - GetPipelineRequest, - GetPipelineResponse, - GetPipelineRequestBuilder> { - - public GetPipelineRequestBuilder(ElasticsearchClient client, String[] ids) { - super(client, GetPipelineAction.INSTANCE, new GetPipelineRequest(ids)); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java deleted file mode 100644 index 2fce285d83f0..000000000000 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequestBuilder.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.ingest; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.xcontent.XContentType; - -public class PutPipelineRequestBuilder extends ActionRequestBuilder { - - public PutPipelineRequestBuilder(ElasticsearchClient client, String id, BytesReference source, XContentType xContentType) { - super(client, PutPipelineTransportAction.TYPE, new PutPipelineRequest(id, source, xContentType)); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java index aca9bb81fb53..ba0c06db968e 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java @@ -100,7 +100,7 @@ public TransformState transform(Object source, TransformState prevState) throws toDelete.removeAll(entities); for (var pipelineToDelete : toDelete) { - var task = new IngestService.DeletePipelineClusterStateUpdateTask(pipelineToDelete); + var task = new IngestService.DeletePipelineClusterStateUpdateTask(null, new DeletePipelineRequest(pipelineToDelete)); state = wrapIngestTaskExecute(task, state); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 23ff692da488..30faae9c1a5f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -127,7 +127,7 @@ public class TransportSearchAction extends HandledTransportAction SHARD_COUNT_LIMIT_SETTING = Setting.longSetting( diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index 4d5a670925b5..95dd1ccdf86d 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -94,16 +94,6 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.DeletePipelineRequestBuilder; -import org.elasticsearch.action.ingest.DeletePipelineTransportAction; -import org.elasticsearch.action.ingest.GetPipelineAction; -import org.elasticsearch.action.ingest.GetPipelineRequest; -import org.elasticsearch.action.ingest.GetPipelineRequestBuilder; -import org.elasticsearch.action.ingest.GetPipelineResponse; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineRequestBuilder; -import org.elasticsearch.action.ingest.PutPipelineTransportAction; import org.elasticsearch.action.ingest.SimulatePipelineAction; import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder; @@ -158,8 +148,8 @@ public void health(final ClusterHealthRequest request, final ActionListener state(final ClusterStateRequest request) { @@ -170,8 +160,8 @@ public void state(final ClusterStateRequest request, final ActionListener updateSettings(final ClusterUpdateSettingsRequest request) { @@ -182,8 +172,8 @@ public void updateSettings(final ClusterUpdateSettingsRequest request, final Act execute(ClusterUpdateSettingsAction.INSTANCE, request, listener); } - public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() { - return new ClusterUpdateSettingsRequestBuilder(this); + public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + return new ClusterUpdateSettingsRequestBuilder(this, masterNodeTimeout, ackTimeout); } public ActionFuture nodesInfo(final NodesInfoRequest request) { @@ -370,38 +360,6 @@ public SnapshotsStatusRequestBuilder prepareSnapshotStatus(TimeValue masterNodeT return new SnapshotsStatusRequestBuilder(this, masterNodeTimeout); } - public void putPipeline(PutPipelineRequest request, ActionListener listener) { - execute(PutPipelineTransportAction.TYPE, request, listener); - } - - public ActionFuture putPipeline(PutPipelineRequest request) { - return execute(PutPipelineTransportAction.TYPE, request); - } - - public PutPipelineRequestBuilder preparePutPipeline(String id, BytesReference source, XContentType xContentType) { - return new PutPipelineRequestBuilder(this, id, source, xContentType); - } - - public void deletePipeline(DeletePipelineRequest request, ActionListener listener) { - execute(DeletePipelineTransportAction.TYPE, request, listener); - } - - public ActionFuture deletePipeline(DeletePipelineRequest request) { - return execute(DeletePipelineTransportAction.TYPE, request); - } - - public DeletePipelineRequestBuilder prepareDeletePipeline(String id) { - return new DeletePipelineRequestBuilder(this, id); - } - - public void getPipeline(GetPipelineRequest request, ActionListener listener) { - execute(GetPipelineAction.INSTANCE, request, listener); - } - - public GetPipelineRequestBuilder prepareGetPipeline(String... ids) { - return new GetPipelineRequestBuilder(this, ids); - } - public void simulatePipeline(SimulatePipelineRequest request, ActionListener listener) { execute(SimulatePipelineAction.INSTANCE, request, listener); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java new file mode 100644 index 000000000000..d647956e752a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStore.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Holds the data stream failure store metadata that enable or disable the failure store of a data stream. Currently, it + * supports the following configurations: + * - enabled + */ +public record DataStreamFailureStore(boolean enabled) implements SimpleDiffable, ToXContentObject { + + public static final ParseField ENABLED_FIELD = new ParseField("enabled"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "failure_store", + false, + (args, unused) -> new DataStreamFailureStore(args[0] == null || (Boolean) args[0]) + ); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED_FIELD); + } + + public DataStreamFailureStore() { + this(true); + } + + public DataStreamFailureStore(StreamInput in) throws IOException { + this(in.readBoolean()); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return SimpleDiffable.readDiffFrom(DataStreamFailureStore::new, in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(enabled); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ENABLED_FIELD.getPreferredName(), enabled); + builder.endObject(); + return builder; + } + + public static DataStreamFailureStore fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java new file mode 100644 index 000000000000..9c7d2a986fa4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamOptions.java @@ -0,0 +1,93 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.SimpleDiffable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Holds data stream dedicated configuration options such as failure store, (in the future lifecycle). Currently, it + * supports the following configurations: + * - failure store + */ +public record DataStreamOptions(@Nullable DataStreamFailureStore failureStore) + implements + SimpleDiffable, + ToXContentObject { + + public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "options", + false, + (args, unused) -> new DataStreamOptions((DataStreamFailureStore) args[0]) + ); + + static { + PARSER.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> DataStreamFailureStore.fromXContent(p), + FAILURE_STORE_FIELD, + ObjectParser.ValueType.OBJECT_OR_NULL + ); + } + + public DataStreamOptions() { + this(null); + } + + public static DataStreamOptions read(StreamInput in) throws IOException { + return new DataStreamOptions(in.readOptionalWriteable(DataStreamFailureStore::new)); + } + + @Nullable + public DataStreamFailureStore getFailureStore() { + return failureStore; + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return SimpleDiffable.readDiffFrom(DataStreamOptions::read, in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(failureStore); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (failureStore != null) { + builder.field(FAILURE_STORE_FIELD.getPreferredName(), failureStore); + } + builder.endObject(); + return builder; + } + + public static DataStreamOptions fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 459c6c6ec733..b945fe7e510f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -567,17 +567,14 @@ private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions // Exclude this one as it's a net-new system index, and we explicitly don't want those. return false; } - if (DataStream.isFailureStoreFeatureFlagEnabled()) { - IndexAbstraction indexAbstraction = context.getState().metadata().getIndicesLookup().get(index.getName()); - if (context.options.allowFailureIndices() == false) { - DataStream parentDataStream = indexAbstraction.getParentDataStream(); - if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { - if (parentDataStream.isFailureStoreIndex(index.getName())) { - if (options.ignoreUnavailable()) { - return false; - } else { - throw new FailureIndexNotSupportedException(index); - } + if (DataStream.isFailureStoreFeatureFlagEnabled() && context.options.allowFailureIndices() == false) { + DataStream parentDataStream = context.getState().metadata().getIndicesLookup().get(index.getName()).getParentDataStream(); + if (parentDataStream != null && parentDataStream.isFailureStoreEnabled()) { + if (parentDataStream.isFailureStoreIndex(index.getName())) { + if (options.ignoreUnavailable()) { + return false; + } else { + throw new FailureIndexNotSupportedException(index); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java index 13c2fabd6b3d..7f0840ca8bb2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java @@ -224,7 +224,7 @@ static void prepareBackingIndex( Settings nodeSettings ) throws IOException { MappingMetadata mm = im.mapping(); - if (mm == null) { + if (mm == null || mm.equals(MappingMetadata.EMPTY_MAPPINGS)) { throw new IllegalArgumentException("backing index [" + im.getIndex().getName() + "] must have mappings for a timestamp field"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 855793e9e978..92e7058bc79d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.routing; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.DiffableUtils; @@ -45,22 +46,15 @@ */ public class RoutingTable implements Iterable, Diffable { - public static final RoutingTable EMPTY_ROUTING_TABLE = new RoutingTable(0, ImmutableOpenMap.of()); - - private final long version; + public static final RoutingTable EMPTY_ROUTING_TABLE = new RoutingTable(ImmutableOpenMap.of()); // index to IndexRoutingTable map private final ImmutableOpenMap indicesRouting; - private RoutingTable(long version, ImmutableOpenMap indicesRouting) { - this.version = version; + private RoutingTable(ImmutableOpenMap indicesRouting) { this.indicesRouting = indicesRouting; } - public RoutingTable withIncrementedVersion() { - return new RoutingTable(version + 1, indicesRouting); - } - /** * Get's the {@link IndexShardRoutingTable} for the given shard id from the given {@link IndexRoutingTable} * or throws a {@link ShardNotFoundException} if no shard by the given id is found in the IndexRoutingTable. @@ -77,15 +71,6 @@ public static IndexShardRoutingTable shardRoutingTable(IndexRoutingTable indexRo return indexShard; } - /** - * Returns the version of the {@link RoutingTable}. - * - * @return version of the {@link RoutingTable} - */ - public long version() { - return this.version; - } - @Override public Iterator iterator() { return indicesRouting.values().iterator(); @@ -331,7 +316,9 @@ public static Diff readDiffFrom(StreamInput in) throws IOException public static RoutingTable readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); - builder.version = in.readLong(); + if (in.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) { + in.readLong(); // previously 'version', unused in all applicable versions so any number will do + } int size = in.readVInt(); for (int i = 0; i < size; i++) { IndexRoutingTable index = IndexRoutingTable.readFrom(in); @@ -343,18 +330,17 @@ public static RoutingTable readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeLong(version); + if (out.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) { + out.writeLong(0); // previously 'version', unused in all applicable versions so any number will do + } out.writeCollection(indicesRouting.values()); } private static class RoutingTableDiff implements Diff { - private final long version; - private final Diff> indicesRouting; RoutingTableDiff(RoutingTable before, RoutingTable after) { - version = after.version; indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting, DiffableUtils.getStringKeySerializer()); } @@ -362,27 +348,31 @@ private static class RoutingTableDiff implements Diff { new DiffableUtils.DiffableValueReader<>(IndexRoutingTable::readFrom, IndexRoutingTable::readDiffFrom); RoutingTableDiff(StreamInput in) throws IOException { - version = in.readLong(); + if (in.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) { + in.readLong(); // previously 'version', unused in all applicable versions so any number will do + } indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), DIFF_VALUE_READER); } @Override public RoutingTable apply(RoutingTable part) { final ImmutableOpenMap updatedRouting = indicesRouting.apply(part.indicesRouting); - if (part.version == version && updatedRouting == part.indicesRouting) { + if (updatedRouting == part.indicesRouting) { return part; } - return new RoutingTable(version, updatedRouting); + return new RoutingTable(updatedRouting); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeLong(version); + if (out.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) { + out.writeLong(0); // previously 'version', unused in all applicable versions so any number will do + } indicesRouting.writeTo(out); } } - public static RoutingTable of(long version, RoutingNodes routingNodes) { + public static RoutingTable of(RoutingNodes routingNodes) { Map indexRoutingTableBuilders = new HashMap<>(); for (RoutingNode routingNode : routingNodes) { for (ShardRouting shardRoutingEntry : routingNode) { @@ -404,7 +394,7 @@ public static RoutingTable of(long version, RoutingNodes routingNodes) { IndexRoutingTable indexRoutingTable = indexBuilder.build(); indicesRouting.put(indexRoutingTable.getIndex().getName(), indexRoutingTable); } - return new RoutingTable(version, indicesRouting.build()); + return new RoutingTable(indicesRouting.build()); } public static Builder builder() { @@ -429,7 +419,6 @@ public static Builder builder(ShardRoutingRoleStrategy shardRoutingRoleStrategy, public static class Builder { private final ShardRoutingRoleStrategy shardRoutingRoleStrategy; - private long version; private ImmutableOpenMap.Builder indicesRouting; public Builder() { @@ -447,7 +436,6 @@ public Builder(ShardRoutingRoleStrategy shardRoutingRoleStrategy) { public Builder(ShardRoutingRoleStrategy shardRoutingRoleStrategy, RoutingTable routingTable) { this.shardRoutingRoleStrategy = shardRoutingRoleStrategy; - this.version = routingTable.version; this.indicesRouting = ImmutableOpenMap.builder(routingTable.indicesRouting); } @@ -591,16 +579,6 @@ public Builder remove(String index) { return this; } - public Builder version(long version) { - this.version = version; - return this; - } - - public Builder incrementVersion() { - this.version++; - return this; - } - /** * Builds the routing table. Note that once this is called the builder * must be thrown away. If you need to build a new RoutingTable as a @@ -610,7 +588,7 @@ public RoutingTable build() { if (indicesRouting == null) { throw new IllegalStateException("once build is called the builder cannot be reused"); } - RoutingTable table = new RoutingTable(version, indicesRouting.build()); + RoutingTable table = new RoutingTable(indicesRouting.build()); indicesRouting = null; return table; } @@ -618,7 +596,7 @@ public RoutingTable build() { @Override public String toString() { - StringBuilder sb = new StringBuilder("routing_table (version ").append(version).append("):\n"); + StringBuilder sb = new StringBuilder("routing_table:\n"); for (IndexRoutingTable entry : indicesRouting.values()) { sb.append(entry.prettyPrint()).append('\n'); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 17bbc8f20793..cc8f8442f2ec 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -159,7 +159,7 @@ public ClusterState applyStartedShards(ClusterState clusterState, List T deserialize( ) throws IOException { try ( StreamInput in = registry == null - ? serialized.streamInput() - : new NamedWriteableAwareStreamInput(serialized.streamInput(), registry) + ? new DeduplicateStreamInput(serialized.streamInput(), new DeduplicatorCache()) + : new DeduplicateNamedWriteableAwareStreamInput(serialized.streamInput(), registry, new DeduplicatorCache()) ) { in.setTransportVersion(serializedAtVersion); return reader.read(in); } } + + /** An object implementing this interface can deduplicate instance of the provided objects.*/ + public interface Deduplicator { + T deduplicate(T object); + } + + private static class DeduplicateStreamInput extends FilterStreamInput implements Deduplicator { + + private final Deduplicator deduplicator; + + private DeduplicateStreamInput(StreamInput delegate, Deduplicator deduplicator) { + super(delegate); + this.deduplicator = deduplicator; + } + + @Override + public T deduplicate(T object) { + return deduplicator.deduplicate(object); + } + } + + private static class DeduplicateNamedWriteableAwareStreamInput extends NamedWriteableAwareStreamInput implements Deduplicator { + + private final Deduplicator deduplicator; + + private DeduplicateNamedWriteableAwareStreamInput( + StreamInput delegate, + NamedWriteableRegistry registry, + Deduplicator deduplicator + ) { + super(delegate, registry); + this.deduplicator = deduplicator; + } + + @Override + public T deduplicate(T object) { + return deduplicator.deduplicate(object); + } + } + + /** + * Implementation of a {@link Deduplicator} cache. It can hold up to 1024 instances. + */ + private static class DeduplicatorCache implements Deduplicator { + + private static final int MAX_SIZE = 1024; + // lazily init + private Map cache = null; + + @SuppressWarnings("unchecked") + @Override + public T deduplicate(T object) { + if (cache == null) { + cache = new HashMap<>(); + cache.put(object, object); + } else if (cache.size() < MAX_SIZE) { + object = (T) cache.computeIfAbsent(object, o -> o); + } + return object; + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index acdc3e32ea31..95552fa508f7 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -884,24 +884,26 @@ protected StoredFieldsReader doGetSequentialStoredFieldsReader(StoredFieldsReade } } - DirectoryReaderWithAllLiveDocs(DirectoryReader in) throws IOException { - super(in, new SubReaderWrapper() { - @Override - public LeafReader wrap(LeafReader leaf) { - final SegmentReader segmentReader = segmentReader(leaf); - final Bits hardLiveDocs = segmentReader.getHardLiveDocs(); - if (hardLiveDocs == null) { - return new LeafReaderWithLiveDocs(leaf, null, leaf.maxDoc()); - } - // Once soft-deletes is enabled, we no longer hard-update or hard-delete documents directly. - // Two scenarios that we have hard-deletes: (1) from old segments where soft-deletes was disabled, - // (2) when IndexWriter hits non-aborted exceptions. These two cases, IW flushes SegmentInfos - // before exposing the hard-deletes, thus we can use the hard-delete count of SegmentInfos. - final int numDocs = segmentReader.maxDoc() - segmentReader.getSegmentInfo().getDelCount(); - assert numDocs == popCount(hardLiveDocs) : numDocs + " != " + popCount(hardLiveDocs); - return new LeafReaderWithLiveDocs(segmentReader, hardLiveDocs, numDocs); + private static final SubReaderWrapper ALL_LIVE_DOCS_SUB_READER_WRAPPER = new SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader leaf) { + final SegmentReader segmentReader = segmentReader(leaf); + final Bits hardLiveDocs = segmentReader.getHardLiveDocs(); + if (hardLiveDocs == null) { + return new LeafReaderWithLiveDocs(leaf, null, leaf.maxDoc()); } - }); + // Once soft-deletes is enabled, we no longer hard-update or hard-delete documents directly. + // Two scenarios that we have hard-deletes: (1) from old segments where soft-deletes was disabled, + // (2) when IndexWriter hits non-aborted exceptions. These two cases, IW flushes SegmentInfos + // before exposing the hard-deletes, thus we can use the hard-delete count of SegmentInfos. + final int numDocs = segmentReader.maxDoc() - segmentReader.getSegmentInfo().getDelCount(); + assert numDocs == popCount(hardLiveDocs) : numDocs + " != " + popCount(hardLiveDocs); + return new LeafReaderWithLiveDocs(segmentReader, hardLiveDocs, numDocs); + } + }; + + DirectoryReaderWithAllLiveDocs(DirectoryReader in) throws IOException { + super(in, ALL_LIVE_DOCS_SUB_READER_WRAPPER); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index ae8f8cb28da1..c1081f3d6285 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -2323,6 +2323,8 @@ static DateFormatter forPattern(String input) { } else if (FormatNames.STRICT_YEAR_MONTH_DAY.matches(input)) { return STRICT_YEAR_MONTH_DAY; } else { + DateUtils.checkTextualDateFormats(input); + try { return newDateFormatter( input, diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java index 8e98adc18336..e312ce78ea15 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java @@ -10,6 +10,9 @@ import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.core.Predicates; +import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.logging.LogManager; import java.time.Clock; import java.time.Duration; @@ -19,6 +22,8 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Predicate; +import java.util.regex.Pattern; import static java.util.Map.entry; import static org.elasticsearch.common.time.DateUtilsRounding.getMonthOfYear; @@ -382,4 +387,16 @@ public static ZonedDateTime nowWithMillisResolution(Clock clock) { Clock millisResolutionClock = Clock.tick(clock, Duration.ofMillis(1)); return ZonedDateTime.now(millisResolutionClock); } + + // check for all textual fields, and localized zone offset + private static final Predicate CONTAINS_CHANGING_TEXT_SPECIFIERS = System.getProperty("java.locale.providers", "") + .contains("COMPAT") ? Pattern.compile("[EcGaO]|MMM|LLL|eee|ccc|QQQ|ZZZZ").asPredicate() : Predicates.never(); + + @UpdateForV9 // this can be removed, we will only use CLDR on v9 + static void checkTextualDateFormats(String format) { + if (CONTAINS_CHANGING_TEXT_SPECIFIERS.test(format)) { + LogManager.getLogger(DateFormatter.class) + .warn("Date format [{}] contains textual field specifiers that could change in JDK 23", format); + } + } } diff --git a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java index b7826ad17add..143d24373e71 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java @@ -98,8 +98,6 @@ static ClusterState updateRoutingTable(final ClusterState state, ShardRoutingRol for (final IndexMetadata indexMetadata : state.metadata().indices().values()) { routingTableBuilder.addAsRecovery(indexMetadata); } - // start with 0 based versions for routing table - routingTableBuilder.version(0); return ClusterState.builder(state).routingTable(routingTableBuilder.build()).build(); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 608d88fdef66..7bc8273eef52 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -166,7 +166,7 @@ private static IndexVersion def(int id, Version luceneVersion) { * In branches 8.7-8.11 see server/src/main/java/org/elasticsearch/index/IndexVersion.java for the equivalent definitions. */ - public static final IndexVersion MINIMUM_COMPATIBLE = V_7_0_0; + public static final IndexVersion MINIMUM_COMPATIBLE = V_8_0_0; static final NavigableMap VERSION_IDS = getAllVersionIds(IndexVersions.class); static final IndexVersion LATEST_DEFINED; @@ -217,8 +217,10 @@ static NavigableMap getAllVersionIds(Class cls) { return Collections.unmodifiableNavigableMap(builder); } + @UpdateForV9 + // We can simplify this once we've removed all references to index versions earlier than MINIMUM_COMPATIBLE static Collection getAllVersions() { - return VERSION_IDS.values(); + return VERSION_IDS.values().stream().filter(v -> v.onOrAfter(MINIMUM_COMPATIBLE)).toList(); } static final IntFunction VERSION_LOOKUP = ReleaseVersions.generateVersionsLookup(IndexVersions.class, LATEST_DEFINED.id()); diff --git a/server/src/main/java/org/elasticsearch/index/codec/DeduplicatingFieldInfosFormat.java b/server/src/main/java/org/elasticsearch/index/codec/DeduplicatingFieldInfosFormat.java index 75ec265a6839..67d98bf30d6e 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/DeduplicatingFieldInfosFormat.java +++ b/server/src/main/java/org/elasticsearch/index/codec/DeduplicatingFieldInfosFormat.java @@ -65,7 +65,7 @@ public FieldInfos read(Directory directory, SegmentInfo segmentInfo, String segm fi.isParentField() ); } - return new FieldInfos(deduplicated); + return new FieldInfosWithUsages(deduplicated); } private static Map internStringStringMap(Map m) { diff --git a/server/src/main/java/org/elasticsearch/index/codec/FieldInfosWithUsages.java b/server/src/main/java/org/elasticsearch/index/codec/FieldInfosWithUsages.java new file mode 100644 index 000000000000..e0d4a94ee28a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/FieldInfosWithUsages.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.IndexOptions; + +public class FieldInfosWithUsages extends FieldInfos { + private final int totalUsages; + + public FieldInfosWithUsages(FieldInfo[] infos) { + super(infos); + this.totalUsages = computeUsages(infos); + } + + public static int computeUsages(FieldInfo[] infos) { + int usages = 0; + for (FieldInfo fi : infos) { + if (fi.getIndexOptions() != IndexOptions.NONE) { + usages++; + } + if (fi.hasNorms()) { + usages++; + } + if (fi.getDocValuesType() != DocValuesType.NONE) { + usages++; + } + if (fi.getPointDimensionCount() > 0) { + usages++; + } + if (fi.getVectorDimension() > 0) { + usages++; + } + } + return usages; + } + + public int getTotalUsages() { + return totalUsages; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 22bab1742589..43b0c27d3058 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -43,6 +43,9 @@ public class CombinedDeletionPolicy extends IndexDeletionPolicy { private final SoftDeletesPolicy softDeletesPolicy; private final LongSupplier globalCheckpointSupplier; private final Map acquiredIndexCommits; // Number of references held against each commit point. + // Index commits internally acquired by the commits listener. We want to track them separately to be able to disregard them + // when checking for externally acquired index commits that haven't been released + private final Set internallyAcquiredIndexCommits; interface CommitsListener { @@ -72,6 +75,7 @@ interface CommitsListener { this.globalCheckpointSupplier = globalCheckpointSupplier; this.commitsListener = commitsListener; this.acquiredIndexCommits = new HashMap<>(); + this.internallyAcquiredIndexCommits = new HashSet<>(); } @Override @@ -114,7 +118,7 @@ public void onCommit(List commits) throws IOException { this.maxSeqNoOfNextSafeCommit = Long.parseLong(commits.get(keptPosition + 1).getUserData().get(SequenceNumbers.MAX_SEQ_NO)); } if (commitsListener != null && previousLastCommit != this.lastCommit) { - newCommit = acquireIndexCommit(false); + newCommit = acquireIndexCommit(false, true); } else { newCommit = null; } @@ -210,15 +214,25 @@ SafeCommitInfo getSafeCommitInfo() { * @param acquiringSafeCommit captures the most recent safe commit point if true; otherwise captures the most recent commit point. */ synchronized IndexCommit acquireIndexCommit(boolean acquiringSafeCommit) { + return acquireIndexCommit(acquiringSafeCommit, false); + } + + private synchronized IndexCommit acquireIndexCommit(boolean acquiringSafeCommit, boolean acquiredInternally) { assert safeCommit != null : "Safe commit is not initialized yet"; assert lastCommit != null : "Last commit is not initialized yet"; final IndexCommit snapshotting = acquiringSafeCommit ? safeCommit : lastCommit; acquiredIndexCommits.merge(snapshotting, 1, Integer::sum); // increase refCount - return wrapCommit(snapshotting); + assert acquiredInternally == false || internallyAcquiredIndexCommits.add(snapshotting) + : "commit [" + snapshotting + "] already added"; + return wrapCommit(snapshotting, acquiredInternally); } protected IndexCommit wrapCommit(IndexCommit indexCommit) { - return new SnapshotIndexCommit(indexCommit); + return wrapCommit(indexCommit, false); + } + + protected IndexCommit wrapCommit(IndexCommit indexCommit, boolean acquiredInternally) { + return new SnapshotIndexCommit(indexCommit, acquiredInternally); } /** @@ -227,7 +241,8 @@ protected IndexCommit wrapCommit(IndexCommit indexCommit) { * @return true if the acquired commit can be clean up. */ synchronized boolean releaseCommit(final IndexCommit acquiredCommit) { - final IndexCommit releasingCommit = ((SnapshotIndexCommit) acquiredCommit).getIndexCommit(); + final SnapshotIndexCommit snapshotIndexCommit = (SnapshotIndexCommit) acquiredCommit; + final IndexCommit releasingCommit = snapshotIndexCommit.getIndexCommit(); assert acquiredIndexCommits.containsKey(releasingCommit) : "Release non-acquired commit;" + "acquired commits [" @@ -242,6 +257,8 @@ synchronized boolean releaseCommit(final IndexCommit acquiredCommit) { } return count - 1; }); + assert snapshotIndexCommit.acquiredInternally == false || internallyAcquiredIndexCommits.remove(releasingCommit) + : "Trying to release a commit [" + releasingCommit + "] that hasn't been previously acquired internally"; assert refCount == null || refCount > 0 : "Number of references for acquired commit can not be negative [" + refCount + "]"; // The commit can be clean up only if no refCount and it is neither the safe commit nor last commit. @@ -296,10 +313,16 @@ private static Set listOfNewFileNames(IndexCommit previous, IndexCommit } /** - * Checks whether the deletion policy is holding on to acquired index commits + * Checks whether the deletion policy is holding on to externally acquired index commits */ - synchronized boolean hasAcquiredIndexCommits() { - return acquiredIndexCommits.isEmpty() == false; + synchronized boolean hasAcquiredIndexCommitsForTesting() { + // We explicitly check only external commits and disregard internal commits acquired by the commits listener + for (var e : acquiredIndexCommits.entrySet()) { + if (internallyAcquiredIndexCommits.contains(e.getKey()) == false || e.getValue() > 1) { + return true; + } + } + return false; } /** @@ -320,8 +343,12 @@ public static String commitDescription(IndexCommit commit) throws IOException { * A wrapper of an index commit that prevents it from being deleted. */ private static class SnapshotIndexCommit extends FilterIndexCommit { - SnapshotIndexCommit(IndexCommit delegate) { + + private final boolean acquiredInternally; + + SnapshotIndexCommit(IndexCommit delegate, boolean acquiredInternally) { super(delegate); + this.acquiredInternally = acquiredInternally; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index b07132eea75e..8dec3fadd8a0 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -81,6 +81,7 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogStats; +import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transports; @@ -139,6 +140,7 @@ public abstract class Engine implements Closeable { protected final EventListener eventListener; protected final ReentrantLock failEngineLock = new ReentrantLock(); protected final SetOnce failedEngine = new SetOnce<>(); + protected final boolean enableRecoverySource; private final AtomicBoolean isClosing = new AtomicBoolean(); private final SubscribableListener drainOnCloseListener = new SubscribableListener<>(); @@ -167,6 +169,9 @@ protected Engine(EngineConfig engineConfig) { // we use the engine class directly here to make sure all subclasses have the same logger name this.logger = Loggers.getLogger(Engine.class, engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); + this.enableRecoverySource = RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING.get( + engineConfig.getIndexSettings().getSettings() + ); } /** @@ -279,7 +284,7 @@ private long getDenseVectorValueCount(final LeafReader atomicReader, List 0) { + if (info != null && info.getVectorDimension() > 0) { switch (info.getVectorEncoding()) { case FLOAT32 -> { FloatVectorValues values = atomicReader.getFloatVectorValues(info.name); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 9e0fbd0bb691..7c456f55ac8a 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -98,6 +98,7 @@ import org.elasticsearch.index.translog.TranslogCorruptedException; import org.elasticsearch.index.translog.TranslogDeletionPolicy; import org.elasticsearch.index.translog.TranslogStats; +import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.threadpool.ThreadPool; @@ -668,8 +669,8 @@ Translog getTranslog() { } // Package private for testing purposes only - boolean hasAcquiredIndexCommits() { - return combinedDeletionPolicy.hasAcquiredIndexCommits(); + boolean hasAcquiredIndexCommitsForTesting() { + return combinedDeletionPolicy.hasAcquiredIndexCommitsForTesting(); } @Override @@ -3130,6 +3131,13 @@ public Translog.Snapshot newChangesSnapshot( boolean singleConsumer, boolean accessStats ) throws IOException { + if (enableRecoverySource == false) { + throw new IllegalStateException( + "Changes snapshot are unavailable when the " + + RecoverySettings.INDICES_RECOVERY_SOURCE_ENABLED_SETTING.getKey() + + " setting is disabled." + ); + } ensureOpen(); refreshIfNeeded(source, toSeqNo); Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 39f4a3a82c5c..1065e67dccf4 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -201,32 +201,17 @@ protected BlockLoader blockLoaderFromSource(BlockLoaderContext blContext) { protected AbstractGeometryFieldMapper( String simpleName, MappedFieldType mappedFieldType, + BuilderParams builderParams, Explicit ignoreMalformed, Explicit ignoreZValue, - MultiFields multiFields, - CopyTo copyTo, Parser parser ) { - super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + super(simpleName, mappedFieldType, builderParams); this.ignoreMalformed = ignoreMalformed; this.ignoreZValue = ignoreZValue; this.parser = parser; } - protected AbstractGeometryFieldMapper( - String simpleName, - MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, - Parser parser, - OnScriptError onScriptError - ) { - super(simpleName, mappedFieldType, multiFields, copyTo, true, onScriptError); - this.ignoreMalformed = Explicit.EXPLICIT_FALSE; - this.ignoreZValue = Explicit.EXPLICIT_FALSE; - this.parser = parser; - } - @Override @SuppressWarnings("unchecked") public AbstractGeometryFieldType fieldType() { @@ -252,7 +237,7 @@ protected boolean supportsParsingObject() { @Override public final void parse(DocumentParserContext context) throws IOException { - if (hasScript) { + if (builderParams.hasScript()) { throw new DocumentParsingException( context.parser().getTokenLocation(), "failed to parse field [" + fieldType().name() + "] of type + " + contentType() + "]", diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java index 2b4ecc8f0a89..23005289e729 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -40,29 +40,16 @@ public static Parameter nullValueParam( protected AbstractPointGeometryFieldMapper( String simpleName, MappedFieldType mappedFieldType, - MultiFields multiFields, + BuilderParams builderParams, Explicit ignoreMalformed, Explicit ignoreZValue, T nullValue, - CopyTo copyTo, Parser parser ) { - super(simpleName, mappedFieldType, ignoreMalformed, ignoreZValue, multiFields, copyTo, parser); + super(simpleName, mappedFieldType, builderParams, ignoreMalformed, ignoreZValue, parser); this.nullValue = nullValue; } - protected AbstractPointGeometryFieldMapper( - String simpleName, - MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, - Parser parser, - OnScriptError onScriptError - ) { - super(simpleName, mappedFieldType, multiFields, copyTo, parser, onScriptError); - this.nullValue = null; - } - public T getNullValue() { return nullValue; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java index cf453bd1571b..92cb1a0d85d8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java @@ -244,7 +244,7 @@ protected abstract static class Builder extends RuntimeField.Builder { ).setSerializerCheck((id, ic, v) -> ic); private final FieldMapper.Parameter onScriptError = FieldMapper.Parameter.onScriptErrorParam( - m -> m.onScriptError, + m -> m.builderParams.onScriptError(), script ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java index 619c6c6613d5..ef20a40f6e9d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java @@ -76,30 +76,14 @@ protected Object nullValueAsSource(T nullValue) { protected AbstractShapeGeometryFieldMapper( String simpleName, MappedFieldType mappedFieldType, + BuilderParams builderParams, Explicit ignoreMalformed, Explicit coerce, Explicit ignoreZValue, Explicit orientation, - MultiFields multiFields, - CopyTo copyTo, Parser parser ) { - super(simpleName, mappedFieldType, ignoreMalformed, ignoreZValue, multiFields, copyTo, parser); - this.coerce = coerce; - this.orientation = orientation; - } - - protected AbstractShapeGeometryFieldMapper( - String simpleName, - MappedFieldType mappedFieldType, - MultiFields multiFields, - Explicit coerce, - Explicit orientation, - CopyTo copyTo, - Parser parser, - OnScriptError onScriptError - ) { - super(simpleName, mappedFieldType, multiFields, copyTo, parser, onScriptError); + super(simpleName, mappedFieldType, builderParams, ignoreMalformed, ignoreZValue, parser); this.coerce = coerce; this.orientation = orientation; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java index 1891e19b6d03..8d9dd99092a0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java @@ -72,8 +72,7 @@ public BinaryFieldMapper build(MapperBuilderContext context) { return new BinaryFieldMapper( leafName(), new BinaryFieldType(context.buildFullName(leafName()), stored.getValue(), hasDocValues.getValue(), meta.getValue()), - multiFieldsBuilder.build(this, context), - copyTo, + builderParams(this, context), this ); } @@ -142,14 +141,8 @@ public Query termQuery(Object value, SearchExecutionContext context) { private final boolean hasDocValues; private final boolean isSyntheticSourceEnabledViaIndexMode; - protected BinaryFieldMapper( - String simpleName, - MappedFieldType mappedFieldType, - MultiFields multiFields, - CopyTo copyTo, - Builder builder - ) { - super(simpleName, mappedFieldType, multiFields, copyTo); + protected BinaryFieldMapper(String simpleName, MappedFieldType mappedFieldType, BuilderParams builderParams, Builder builder) { + super(simpleName, mappedFieldType, builderParams); this.stored = builder.stored.getValue(); this.hasDocValues = builder.hasDocValues.getValue(); this.isSyntheticSourceEnabledViaIndexMode = builder.isSyntheticSourceEnabledViaIndexMode; @@ -201,54 +194,40 @@ protected String contentType() { } @Override - protected SyntheticSourceMode syntheticSourceMode() { - return SyntheticSourceMode.NATIVE; - } - - @Override - public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - if (copyTo.copyToFields().isEmpty() != true) { - throw new IllegalArgumentException( - "field [" + fullPath() + "] of type [" + typeName() + "] doesn't support synthetic source because it declares copy_to" - ); - } - if (hasDocValues == false) { - throw new IllegalArgumentException( - "field [" - + fullPath() - + "] of type [" - + typeName() - + "] doesn't support synthetic source because it doesn't have doc values" - ); - } - - return new BinaryDocValuesSyntheticFieldLoader(fullPath()) { - @Override - protected void writeValue(XContentBuilder b, BytesRef value) throws IOException { - var in = new ByteArrayStreamInput(); - in.reset(value.bytes, value.offset, value.length); - - int count = in.readVInt(); - switch (count) { - case 0: - return; - case 1: - b.field(leafName()); - break; - default: - b.startArray(leafName()); + protected SyntheticSourceSupport syntheticSourceSupport() { + if (hasDocValues) { + var loader = new BinaryDocValuesSyntheticFieldLoader(fullPath()) { + @Override + protected void writeValue(XContentBuilder b, BytesRef value) throws IOException { + var in = new ByteArrayStreamInput(); + in.reset(value.bytes, value.offset, value.length); + + int count = in.readVInt(); + switch (count) { + case 0: + return; + case 1: + b.field(leafName()); + break; + default: + b.startArray(leafName()); + } + + for (int i = 0; i < count; i++) { + byte[] bytes = in.readByteArray(); + b.value(Base64.getEncoder().encodeToString(bytes)); + } + + if (count > 1) { + b.endArray(); + } } + }; - for (int i = 0; i < count; i++) { - byte[] bytes = in.readByteArray(); - b.value(Base64.getEncoder().encodeToString(bytes)); - } + return new SyntheticSourceSupport.Native(loader); + } - if (count > 1) { - b.endArray(); - } - } - }; + return super.syntheticSourceSupport(); } public static final class CustomBinaryDocValuesField extends CustomDocValuesField { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 1f0088ec9647..e450d3916fc6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -89,7 +89,10 @@ public static final class Builder extends FieldMapper.DimensionBuilder { ).acceptsNull(); private final Parameter